diff --git a/README.rst b/README.rst index 32e4dc55..ca7011cd 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ Python Client for Google Kubernetes Engine API ============================================== -|ga| |pypi| |versions| +|ga| |pypi| |versions| `Google Kubernetes Engine API`_: The Google Kubernetes Engine API is used for building and managing container based applications, powered by the open source @@ -49,11 +49,13 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 +Python >= 3.6 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. +Python == 2.7. + +The last version of this library compatible with Python 2.7 is google-cloud-container==1.0.1 Mac/Linux diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 00000000..b96d89ff --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,154 @@ +# 2.0.0 Migration Guide + +The 2.0 release of the `google-cloud-container` client is a significant upgrade based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python), and includes substantial interface changes. Existing code written for earlier versions of this library will likely require updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an [issue](https://github.com/googleapis/python-container/issues). + +## Supported Python Versions + +> **WARNING**: Breaking change + +The 2.0.0 release requires Python 3.6+. + + +## Method Calls + +> **WARNING**: Breaking change + +Methods expect request objects. We provide a script that will convert most common use cases. + +* Install the library + +```py +python3 -m pip install google-cloud-container +``` + +* The script `fixup_container_v1_keywords.py` and `fixup_container_v1beta1_keywords.py` +are shipped with the library. It expects an input directory (with the code to convert) and an empty destination directory. + +```sh +$ fixup_container_v1_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +**Before:** +```py +from google.cloud import container_v1 + +client = container_v1.ClusterManagerClient() + +clusters = client.list_clusters( + project_id="project_id", zone="us-central1-a", parent="parent" +) +``` + + +**After:** +```py +from google.cloud import container_v1 + +client = container_v1.ClusterManagerClient() + +clusters = client.list_clusters( + request = {'project_id': "project_id", 'zone': "us-central1-a", 'parent': "parent"} +) +``` + +### More Details + +In `google-cloud-container<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. + +**Before:** +```py + def list_clusters( + self, + project_id=None, + zone=None, + parent=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): +``` + +In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. + +Some methods have additional keyword only parameters. The available parameters depend on the [`google.api.method_signature` annotation](https://github.com/googleapis/googleapis/blob/master/google/container/v1/cluster_service.proto#L48) specified by the API producer. + + +**After:** +```py + def list_clusters( + self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: +``` + +> **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. +> Passing both will result in an error. + + +Both of these calls are valid: + +```py +response = client.list_clusters( + request={ + "project_id": project_id, + "zone": zone, + "parent": parent, + } +) +``` + +```py +response = client.list_clusters( + project_id=project_id, + zone=zone, + parent=parent, +) +``` + +This call is invalid because it mixes `request` with a keyword argument `parent`. Executing this code will result in an error. + +```py +response = client.list_clusters( + request={ + "project_id": project_id, + "zone": zone, + }, + parent=parent +) +``` + + + +## Enums and Types + + +> **WARNING**: Breaking change + +The submodules `enums` and `types` have been removed. + +**Before:** +```py + +from google.cloud import container + +status = container.enums.Cluster.Status.RUNNING +cluster = container.types.Cluster(name="name") +``` + + +**After:** +```py +from google.cloud import container + +status = container.Cluster.Status.RUNNING +cluster = container.Cluster(name="name") +``` diff --git a/docs/UPGRADING.md b/docs/UPGRADING.md new file mode 120000 index 00000000..01097c8c --- /dev/null +++ b/docs/UPGRADING.md @@ -0,0 +1 @@ +../UPGRADING.md \ No newline at end of file diff --git a/docs/container_v1/services.rst b/docs/container_v1/services.rst new file mode 100644 index 00000000..a23224df --- /dev/null +++ b/docs/container_v1/services.rst @@ -0,0 +1,6 @@ +Services for Google Container v1 API +==================================== + +.. automodule:: google.cloud.container_v1.services.cluster_manager + :members: + :inherited-members: diff --git a/docs/container_v1/types.rst b/docs/container_v1/types.rst new file mode 100644 index 00000000..6b9225d6 --- /dev/null +++ b/docs/container_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Container v1 API +================================= + +.. automodule:: google.cloud.container_v1.types + :members: diff --git a/docs/container_v1beta1/services.rst b/docs/container_v1beta1/services.rst new file mode 100644 index 00000000..e4b6a994 --- /dev/null +++ b/docs/container_v1beta1/services.rst @@ -0,0 +1,6 @@ +Services for Google Container v1beta1 API +========================================= + +.. automodule:: google.cloud.container_v1beta1.services.cluster_manager + :members: + :inherited-members: diff --git a/docs/container_v1beta1/types.rst b/docs/container_v1beta1/types.rst new file mode 100644 index 00000000..8c258957 --- /dev/null +++ b/docs/container_v1beta1/types.rst @@ -0,0 +1,5 @@ +Types for Google Container v1beta1 API +====================================== + +.. automodule:: google.cloud.container_v1beta1.types + :members: diff --git a/docs/gapic/v1/api.rst b/docs/gapic/v1/api.rst deleted file mode 100644 index 62435f64..00000000 --- a/docs/gapic/v1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Google Container Engine API -====================================== - -.. automodule:: google.cloud.container_v1 - :members: - :inherited-members: \ No newline at end of file diff --git a/docs/gapic/v1/types.rst b/docs/gapic/v1/types.rst deleted file mode 100644 index 7b1c1f95..00000000 --- a/docs/gapic/v1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Google Container Engine API Client -============================================ - -.. automodule:: google.cloud.container_v1.types - :members: \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index e6e45813..b3f611dd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,11 +4,33 @@ API Reference ------------- + +v1 +------------- .. toctree:: - :maxdepth: 2 + :maxdepth: 2 + + container_v1/services + container_v1/types + +v1beta1 +------------- +.. toctree:: + :maxdepth: 2 + + container_v1beta1/services + container_v1beta1/types + +Migration Guide +--------------- + +See the guide below for instructions on migrating to the 2.x release of this library. + +.. toctree:: + :maxdepth: 2 + + UPGRADING - gapic/v1/api - gapic/v1/types Changelog --------- diff --git a/google/__init__.py b/google/__init__.py deleted file mode 100644 index 2f4b4738..00000000 --- a/google/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py deleted file mode 100644 index 2f4b4738..00000000 --- a/google/cloud/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/google/cloud/container/__init__.py b/google/cloud/container/__init__.py new file mode 100644 index 00000000..216ce9b3 --- /dev/null +++ b/google/cloud/container/__init__.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.container_v1.services.cluster_manager.async_client import ( + ClusterManagerAsyncClient, +) +from google.cloud.container_v1.services.cluster_manager.client import ( + ClusterManagerClient, +) +from google.cloud.container_v1.types.cluster_service import AcceleratorConfig +from google.cloud.container_v1.types.cluster_service import AddonsConfig +from google.cloud.container_v1.types.cluster_service import AuthenticatorGroupsConfig +from google.cloud.container_v1.types.cluster_service import AutoUpgradeOptions +from google.cloud.container_v1.types.cluster_service import ( + AutoprovisioningNodePoolDefaults, +) +from google.cloud.container_v1.types.cluster_service import BinaryAuthorization +from google.cloud.container_v1.types.cluster_service import CancelOperationRequest +from google.cloud.container_v1.types.cluster_service import ClientCertificateConfig +from google.cloud.container_v1.types.cluster_service import CloudRunConfig +from google.cloud.container_v1.types.cluster_service import Cluster +from google.cloud.container_v1.types.cluster_service import ClusterAutoscaling +from google.cloud.container_v1.types.cluster_service import ClusterUpdate +from google.cloud.container_v1.types.cluster_service import CompleteIPRotationRequest +from google.cloud.container_v1.types.cluster_service import CreateClusterRequest +from google.cloud.container_v1.types.cluster_service import CreateNodePoolRequest +from google.cloud.container_v1.types.cluster_service import DailyMaintenanceWindow +from google.cloud.container_v1.types.cluster_service import DatabaseEncryption +from google.cloud.container_v1.types.cluster_service import DeleteClusterRequest +from google.cloud.container_v1.types.cluster_service import DeleteNodePoolRequest +from google.cloud.container_v1.types.cluster_service import GetClusterRequest +from google.cloud.container_v1.types.cluster_service import GetNodePoolRequest +from google.cloud.container_v1.types.cluster_service import GetOperationRequest +from google.cloud.container_v1.types.cluster_service import GetServerConfigRequest +from google.cloud.container_v1.types.cluster_service import HorizontalPodAutoscaling +from google.cloud.container_v1.types.cluster_service import HttpLoadBalancing +from google.cloud.container_v1.types.cluster_service import IPAllocationPolicy +from google.cloud.container_v1.types.cluster_service import IntraNodeVisibilityConfig +from google.cloud.container_v1.types.cluster_service import KubernetesDashboard +from google.cloud.container_v1.types.cluster_service import LegacyAbac +from google.cloud.container_v1.types.cluster_service import ListClustersRequest +from google.cloud.container_v1.types.cluster_service import ListClustersResponse +from google.cloud.container_v1.types.cluster_service import ListNodePoolsRequest +from google.cloud.container_v1.types.cluster_service import ListNodePoolsResponse +from google.cloud.container_v1.types.cluster_service import ListOperationsRequest +from google.cloud.container_v1.types.cluster_service import ListOperationsResponse +from google.cloud.container_v1.types.cluster_service import ListUsableSubnetworksRequest +from google.cloud.container_v1.types.cluster_service import ( + ListUsableSubnetworksResponse, +) +from google.cloud.container_v1.types.cluster_service import MaintenancePolicy +from google.cloud.container_v1.types.cluster_service import MaintenanceWindow +from google.cloud.container_v1.types.cluster_service import MasterAuth +from google.cloud.container_v1.types.cluster_service import ( + MasterAuthorizedNetworksConfig, +) +from google.cloud.container_v1.types.cluster_service import MaxPodsConstraint +from google.cloud.container_v1.types.cluster_service import NetworkConfig +from google.cloud.container_v1.types.cluster_service import NetworkPolicy +from google.cloud.container_v1.types.cluster_service import NetworkPolicyConfig +from google.cloud.container_v1.types.cluster_service import NodeConfig +from google.cloud.container_v1.types.cluster_service import NodeManagement +from google.cloud.container_v1.types.cluster_service import NodePool +from google.cloud.container_v1.types.cluster_service import NodePoolAutoscaling +from google.cloud.container_v1.types.cluster_service import NodeTaint +from google.cloud.container_v1.types.cluster_service import Operation +from google.cloud.container_v1.types.cluster_service import PrivateClusterConfig +from google.cloud.container_v1.types.cluster_service import RecurringTimeWindow +from google.cloud.container_v1.types.cluster_service import ResourceLimit +from google.cloud.container_v1.types.cluster_service import ResourceUsageExportConfig +from google.cloud.container_v1.types.cluster_service import ( + RollbackNodePoolUpgradeRequest, +) +from google.cloud.container_v1.types.cluster_service import ServerConfig +from google.cloud.container_v1.types.cluster_service import SetAddonsConfigRequest +from google.cloud.container_v1.types.cluster_service import SetLabelsRequest +from google.cloud.container_v1.types.cluster_service import SetLegacyAbacRequest +from google.cloud.container_v1.types.cluster_service import SetLocationsRequest +from google.cloud.container_v1.types.cluster_service import SetLoggingServiceRequest +from google.cloud.container_v1.types.cluster_service import SetMaintenancePolicyRequest +from google.cloud.container_v1.types.cluster_service import SetMasterAuthRequest +from google.cloud.container_v1.types.cluster_service import SetMonitoringServiceRequest +from google.cloud.container_v1.types.cluster_service import SetNetworkPolicyRequest +from google.cloud.container_v1.types.cluster_service import ( + SetNodePoolAutoscalingRequest, +) +from google.cloud.container_v1.types.cluster_service import SetNodePoolManagementRequest +from google.cloud.container_v1.types.cluster_service import SetNodePoolSizeRequest +from google.cloud.container_v1.types.cluster_service import ShieldedInstanceConfig +from google.cloud.container_v1.types.cluster_service import StartIPRotationRequest +from google.cloud.container_v1.types.cluster_service import StatusCondition +from google.cloud.container_v1.types.cluster_service import TimeWindow +from google.cloud.container_v1.types.cluster_service import UpdateClusterRequest +from google.cloud.container_v1.types.cluster_service import UpdateMasterRequest +from google.cloud.container_v1.types.cluster_service import UpdateNodePoolRequest +from google.cloud.container_v1.types.cluster_service import UsableSubnetwork +from google.cloud.container_v1.types.cluster_service import ( + UsableSubnetworkSecondaryRange, +) +from google.cloud.container_v1.types.cluster_service import VerticalPodAutoscaling + +__all__ = ( + "AcceleratorConfig", + "AddonsConfig", + "AuthenticatorGroupsConfig", + "AutoUpgradeOptions", + "AutoprovisioningNodePoolDefaults", + "BinaryAuthorization", + "CancelOperationRequest", + "ClientCertificateConfig", + "CloudRunConfig", + "Cluster", + "ClusterAutoscaling", + "ClusterManagerAsyncClient", + "ClusterManagerClient", + "ClusterUpdate", + "CompleteIPRotationRequest", + "CreateClusterRequest", + "CreateNodePoolRequest", + "DailyMaintenanceWindow", + "DatabaseEncryption", + "DeleteClusterRequest", + "DeleteNodePoolRequest", + "GetClusterRequest", + "GetNodePoolRequest", + "GetOperationRequest", + "GetServerConfigRequest", + "HorizontalPodAutoscaling", + "HttpLoadBalancing", + "IPAllocationPolicy", + "IntraNodeVisibilityConfig", + "KubernetesDashboard", + "LegacyAbac", + "ListClustersRequest", + "ListClustersResponse", + "ListNodePoolsRequest", + "ListNodePoolsResponse", + "ListOperationsRequest", + "ListOperationsResponse", + "ListUsableSubnetworksRequest", + "ListUsableSubnetworksResponse", + "MaintenancePolicy", + "MaintenanceWindow", + "MasterAuth", + "MasterAuthorizedNetworksConfig", + "MaxPodsConstraint", + "NetworkConfig", + "NetworkPolicy", + "NetworkPolicyConfig", + "NodeConfig", + "NodeManagement", + "NodePool", + "NodePoolAutoscaling", + "NodeTaint", + "Operation", + "PrivateClusterConfig", + "RecurringTimeWindow", + "ResourceLimit", + "ResourceUsageExportConfig", + "RollbackNodePoolUpgradeRequest", + "ServerConfig", + "SetAddonsConfigRequest", + "SetLabelsRequest", + "SetLegacyAbacRequest", + "SetLocationsRequest", + "SetLoggingServiceRequest", + "SetMaintenancePolicyRequest", + "SetMasterAuthRequest", + "SetMonitoringServiceRequest", + "SetNetworkPolicyRequest", + "SetNodePoolAutoscalingRequest", + "SetNodePoolManagementRequest", + "SetNodePoolSizeRequest", + "ShieldedInstanceConfig", + "StartIPRotationRequest", + "StatusCondition", + "TimeWindow", + "UpdateClusterRequest", + "UpdateMasterRequest", + "UpdateNodePoolRequest", + "UsableSubnetwork", + "UsableSubnetworkSecondaryRange", + "VerticalPodAutoscaling", +) diff --git a/google/cloud/container/py.typed b/google/cloud/container/py.typed new file mode 100644 index 00000000..fd835114 --- /dev/null +++ b/google/cloud/container/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-container package uses inline types. diff --git a/google/cloud/container_v1/__init__.py b/google/cloud/container_v1/__init__.py index 740756b8..1c3fbcab 100644 --- a/google/cloud/container_v1/__init__.py +++ b/google/cloud/container_v1/__init__.py @@ -1,45 +1,181 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.container_v1 import types -from google.cloud.container_v1.gapic import cluster_manager_client -from google.cloud.container_v1.gapic import enums - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class ClusterManagerClient(cluster_manager_client.ClusterManagerClient): - __doc__ = cluster_manager_client.ClusterManagerClient.__doc__ - enums = enums +from .services.cluster_manager import ClusterManagerClient +from .types.cluster_service import AcceleratorConfig +from .types.cluster_service import AddonsConfig +from .types.cluster_service import AuthenticatorGroupsConfig +from .types.cluster_service import AutoUpgradeOptions +from .types.cluster_service import AutoprovisioningNodePoolDefaults +from .types.cluster_service import BinaryAuthorization +from .types.cluster_service import CancelOperationRequest +from .types.cluster_service import ClientCertificateConfig +from .types.cluster_service import CloudRunConfig +from .types.cluster_service import Cluster +from .types.cluster_service import ClusterAutoscaling +from .types.cluster_service import ClusterUpdate +from .types.cluster_service import CompleteIPRotationRequest +from .types.cluster_service import CreateClusterRequest +from .types.cluster_service import CreateNodePoolRequest +from .types.cluster_service import DailyMaintenanceWindow +from .types.cluster_service import DatabaseEncryption +from .types.cluster_service import DeleteClusterRequest +from .types.cluster_service import DeleteNodePoolRequest +from .types.cluster_service import GetClusterRequest +from .types.cluster_service import GetNodePoolRequest +from .types.cluster_service import GetOperationRequest +from .types.cluster_service import GetServerConfigRequest +from .types.cluster_service import HorizontalPodAutoscaling +from .types.cluster_service import HttpLoadBalancing +from .types.cluster_service import IPAllocationPolicy +from .types.cluster_service import IntraNodeVisibilityConfig +from .types.cluster_service import KubernetesDashboard +from .types.cluster_service import LegacyAbac +from .types.cluster_service import ListClustersRequest +from .types.cluster_service import ListClustersResponse +from .types.cluster_service import ListNodePoolsRequest +from .types.cluster_service import ListNodePoolsResponse +from .types.cluster_service import ListOperationsRequest +from .types.cluster_service import ListOperationsResponse +from .types.cluster_service import ListUsableSubnetworksRequest +from .types.cluster_service import ListUsableSubnetworksResponse +from .types.cluster_service import MaintenancePolicy +from .types.cluster_service import MaintenanceWindow +from .types.cluster_service import MasterAuth +from .types.cluster_service import MasterAuthorizedNetworksConfig +from .types.cluster_service import MaxPodsConstraint +from .types.cluster_service import NetworkConfig +from .types.cluster_service import NetworkPolicy +from .types.cluster_service import NetworkPolicyConfig +from .types.cluster_service import NodeConfig +from .types.cluster_service import NodeManagement +from .types.cluster_service import NodePool +from .types.cluster_service import NodePoolAutoscaling +from .types.cluster_service import NodeTaint +from .types.cluster_service import Operation +from .types.cluster_service import PrivateClusterConfig +from .types.cluster_service import RecurringTimeWindow +from .types.cluster_service import ResourceLimit +from .types.cluster_service import ResourceUsageExportConfig +from .types.cluster_service import RollbackNodePoolUpgradeRequest +from .types.cluster_service import ServerConfig +from .types.cluster_service import SetAddonsConfigRequest +from .types.cluster_service import SetLabelsRequest +from .types.cluster_service import SetLegacyAbacRequest +from .types.cluster_service import SetLocationsRequest +from .types.cluster_service import SetLoggingServiceRequest +from .types.cluster_service import SetMaintenancePolicyRequest +from .types.cluster_service import SetMasterAuthRequest +from .types.cluster_service import SetMonitoringServiceRequest +from .types.cluster_service import SetNetworkPolicyRequest +from .types.cluster_service import SetNodePoolAutoscalingRequest +from .types.cluster_service import SetNodePoolManagementRequest +from .types.cluster_service import SetNodePoolSizeRequest +from .types.cluster_service import ShieldedInstanceConfig +from .types.cluster_service import StartIPRotationRequest +from .types.cluster_service import StatusCondition +from .types.cluster_service import TimeWindow +from .types.cluster_service import UpdateClusterRequest +from .types.cluster_service import UpdateMasterRequest +from .types.cluster_service import UpdateNodePoolRequest +from .types.cluster_service import UsableSubnetwork +from .types.cluster_service import UsableSubnetworkSecondaryRange +from .types.cluster_service import VerticalPodAutoscaling __all__ = ( - "enums", - "types", + "AcceleratorConfig", + "AddonsConfig", + "AuthenticatorGroupsConfig", + "AutoUpgradeOptions", + "AutoprovisioningNodePoolDefaults", + "BinaryAuthorization", + "CancelOperationRequest", + "ClientCertificateConfig", + "CloudRunConfig", + "Cluster", + "ClusterAutoscaling", + "ClusterUpdate", + "CompleteIPRotationRequest", + "CreateClusterRequest", + "CreateNodePoolRequest", + "DailyMaintenanceWindow", + "DatabaseEncryption", + "DeleteClusterRequest", + "DeleteNodePoolRequest", + "GetClusterRequest", + "GetNodePoolRequest", + "GetOperationRequest", + "GetServerConfigRequest", + "HorizontalPodAutoscaling", + "HttpLoadBalancing", + "IPAllocationPolicy", + "IntraNodeVisibilityConfig", + "KubernetesDashboard", + "LegacyAbac", + "ListClustersRequest", + "ListClustersResponse", + "ListNodePoolsRequest", + "ListNodePoolsResponse", + "ListOperationsRequest", + "ListOperationsResponse", + "ListUsableSubnetworksRequest", + "ListUsableSubnetworksResponse", + "MaintenancePolicy", + "MaintenanceWindow", + "MasterAuth", + "MasterAuthorizedNetworksConfig", + "MaxPodsConstraint", + "NetworkConfig", + "NetworkPolicy", + "NetworkPolicyConfig", + "NodeConfig", + "NodeManagement", + "NodePool", + "NodePoolAutoscaling", + "NodeTaint", + "Operation", + "PrivateClusterConfig", + "RecurringTimeWindow", + "ResourceLimit", + "ResourceUsageExportConfig", + "RollbackNodePoolUpgradeRequest", + "ServerConfig", + "SetAddonsConfigRequest", + "SetLabelsRequest", + "SetLegacyAbacRequest", + "SetLocationsRequest", + "SetLoggingServiceRequest", + "SetMaintenancePolicyRequest", + "SetMasterAuthRequest", + "SetMonitoringServiceRequest", + "SetNetworkPolicyRequest", + "SetNodePoolAutoscalingRequest", + "SetNodePoolManagementRequest", + "SetNodePoolSizeRequest", + "ShieldedInstanceConfig", + "StartIPRotationRequest", + "StatusCondition", + "TimeWindow", + "UpdateClusterRequest", + "UpdateMasterRequest", + "UpdateNodePoolRequest", + "UsableSubnetwork", + "UsableSubnetworkSecondaryRange", + "VerticalPodAutoscaling", "ClusterManagerClient", ) diff --git a/google/cloud/container_v1/gapic/cluster_manager_client.py b/google/cloud/container_v1/gapic/cluster_manager_client.py deleted file mode 100644 index 7d6c88a4..00000000 --- a/google/cloud/container_v1/gapic/cluster_manager_client.py +++ /dev/null @@ -1,3066 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.container.v1 ClusterManager API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import grpc - -from google.cloud.container_v1.gapic import cluster_manager_client_config -from google.cloud.container_v1.gapic import enums -from google.cloud.container_v1.gapic.transports import cluster_manager_grpc_transport -from google.cloud.container_v1.proto import cluster_service_pb2 -from google.cloud.container_v1.proto import cluster_service_pb2_grpc -from google.protobuf import empty_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-container", -).version - - -class ClusterManagerClient(object): - """Google Kubernetes Engine Cluster Manager v1""" - - SERVICE_ADDRESS = "container.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.container.v1.ClusterManager" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.ClusterManagerGrpcTransport, - Callable[[~.Credentials, type], ~.ClusterManagerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = cluster_manager_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=cluster_manager_grpc_transport.ClusterManagerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = cluster_manager_grpc_transport.ClusterManagerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def delete_cluster( - self, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes the cluster, including the Kubernetes endpoint and all worker - nodes. - - Firewalls and routes that were configured during cluster creation - are also deleted. - - Other Google Compute Engine resources that might be in use by the cluster, - such as load balancer resources, are not deleted if they weren't present - when the cluster was initially created. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.delete_cluster() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to delete. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to delete. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.DeleteClusterRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_node_pool( - self, - project_id=None, - zone=None, - cluster_id=None, - node_pool_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a node pool from a cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.delete_node_pool() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Deprecated. The name of the node pool to delete. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node pool - to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_node_pool, - default_retry=self._method_configs["DeleteNodePool"].retry, - default_timeout=self._method_configs["DeleteNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.DeleteNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["delete_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_clusters( - self, - project_id=None, - zone=None, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all clusters owned by a project in either the specified zone or all - zones. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.list_clusters() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the parent field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides, or "-" for all zones. This field has been - deprecated and replaced by the parent field. - parent (str): The parent (project and location) where the clusters will be listed. - Specified in the format ``projects/*/locations/*``. Location "-" matches - all zones and all regions. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.ListClustersResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListClustersRequest( - project_id=project_id, zone=zone, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_clusters"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_cluster( - self, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the details of a specific cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.get_cluster() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to retrieve. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to retrieve. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetClusterRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_cluster( - self, - cluster, - project_id=None, - zone=None, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster, consisting of the specified number and type of - Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, the - Kubelet creates routes for each node to allow the containers on that - node to communicate with all other instances in the cluster. - - Finally, an entry is added to the project's global metadata indicating - which CIDR range the cluster is using. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(cluster) - - Args: - cluster (Union[dict, ~google.cloud.container_v1.types.Cluster]): Required. A `cluster - resource `__ - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.Cluster` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the parent field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the parent field. - parent (str): The parent (project and location) where the cluster will be created. - Specified in the format ``projects/*/locations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CreateClusterRequest( - cluster=cluster, project_id=project_id, zone=zone, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_cluster( - self, - update, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the settings of a specific cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `update`: - >>> update = {} - >>> - >>> response = client.update_cluster(update) - - Args: - update (Union[dict, ~google.cloud.container_v1.types.ClusterUpdate]): Required. A description of the update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.ClusterUpdate` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to update. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.UpdateClusterRequest( - update=update, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_node_pool( - self, - node_version, - image_type, - project_id=None, - zone=None, - cluster_id=None, - node_pool_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the version and/or image type for the specified node pool. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `node_version`: - >>> node_version = '' - >>> - >>> # TODO: Initialize `image_type`: - >>> image_type = '' - >>> - >>> response = client.update_node_pool(node_version, image_type) - - Args: - node_version (str): Required. The Kubernetes version to change the nodes to (typically an - upgrade). - - Users may specify either explicit versions offered by Kubernetes Engine or - version aliases, which have the following behavior: - - - "latest": picks the highest valid Kubernetes version - - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit Kubernetes version - - "-": picks the Kubernetes master version - image_type (str): Required. The desired image type for the node pool. - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Deprecated. The name of the node pool to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool) of the node pool to - update. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "update_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_node_pool, - default_retry=self._method_configs["UpdateNodePool"].retry, - default_timeout=self._method_configs["UpdateNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.UpdateNodePoolRequest( - node_version=node_version, - image_type=image_type, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_node_pool_autoscaling( - self, - autoscaling, - project_id=None, - zone=None, - cluster_id=None, - node_pool_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the autoscaling settings for the specified node pool. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `autoscaling`: - >>> autoscaling = {} - >>> - >>> response = client.set_node_pool_autoscaling(autoscaling) - - Args: - autoscaling (Union[dict, ~google.cloud.container_v1.types.NodePoolAutoscaling]): Required. Autoscaling configuration for the node pool. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.NodePoolAutoscaling` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Deprecated. The name of the node pool to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool) of the node pool to - set autoscaler settings. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_node_pool_autoscaling" not in self._inner_api_calls: - self._inner_api_calls[ - "set_node_pool_autoscaling" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_node_pool_autoscaling, - default_retry=self._method_configs["SetNodePoolAutoscaling"].retry, - default_timeout=self._method_configs["SetNodePoolAutoscaling"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNodePoolAutoscalingRequest( - autoscaling=autoscaling, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_node_pool_autoscaling"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_logging_service( - self, - logging_service, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the logging service for a specific cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `logging_service`: - >>> logging_service = '' - >>> - >>> response = client.set_logging_service(logging_service) - - Args: - logging_service (str): Required. The logging service the cluster should use to write - metrics. Currently available options: - - - "logging.googleapis.com" - the Google Cloud Logging service - - "none" - no metrics will be exported from the cluster - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to set logging. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_logging_service" not in self._inner_api_calls: - self._inner_api_calls[ - "set_logging_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_logging_service, - default_retry=self._method_configs["SetLoggingService"].retry, - default_timeout=self._method_configs["SetLoggingService"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLoggingServiceRequest( - logging_service=logging_service, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_logging_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_monitoring_service( - self, - monitoring_service, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the monitoring service for a specific cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `monitoring_service`: - >>> monitoring_service = '' - >>> - >>> response = client.set_monitoring_service(monitoring_service) - - Args: - monitoring_service (str): Required. The monitoring service the cluster should use to write - metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring - service with Kubernetes-native resource model - - "monitoring.googleapis.com" - the Google Cloud Monitoring service - - "none" - no metrics will be exported from the cluster - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to set - monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_monitoring_service" not in self._inner_api_calls: - self._inner_api_calls[ - "set_monitoring_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_monitoring_service, - default_retry=self._method_configs["SetMonitoringService"].retry, - default_timeout=self._method_configs["SetMonitoringService"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetMonitoringServiceRequest( - monitoring_service=monitoring_service, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_monitoring_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_addons_config( - self, - addons_config, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the addons for a specific cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `addons_config`: - >>> addons_config = {} - >>> - >>> response = client.set_addons_config(addons_config) - - Args: - addons_config (Union[dict, ~google.cloud.container_v1.types.AddonsConfig]): Required. The desired configurations for the various addons available to run in the - cluster. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.AddonsConfig` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to set addons. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_addons_config" not in self._inner_api_calls: - self._inner_api_calls[ - "set_addons_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_addons_config, - default_retry=self._method_configs["SetAddonsConfig"].retry, - default_timeout=self._method_configs["SetAddonsConfig"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetAddonsConfigRequest( - addons_config=addons_config, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_addons_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_locations( - self, - locations, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the locations for a specific cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `locations`: - >>> locations = [] - >>> - >>> response = client.set_locations(locations) - - Args: - locations (list[str]): Required. The desired list of Google Compute Engine - `zones `__ in - which the cluster's nodes should be located. Changing the locations a - cluster is in will result in nodes being either created or removed from - the cluster, depending on whether locations are being added or removed. - - This list must always include the cluster's primary zone. - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to set - locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_locations" not in self._inner_api_calls: - self._inner_api_calls[ - "set_locations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_locations, - default_retry=self._method_configs["SetLocations"].retry, - default_timeout=self._method_configs["SetLocations"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLocationsRequest( - locations=locations, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_locations"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_master( - self, - master_version, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the master for a specific cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `master_version`: - >>> master_version = '' - >>> - >>> response = client.update_master(master_version) - - Args: - master_version (str): Required. The Kubernetes version to change the master to. - - Users may specify either explicit versions offered by Kubernetes Engine or - version aliases, which have the following behavior: - - - "latest": picks the highest valid Kubernetes version - - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit Kubernetes version - - "-": picks the default Kubernetes version - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to update. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_master" not in self._inner_api_calls: - self._inner_api_calls[ - "update_master" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_master, - default_retry=self._method_configs["UpdateMaster"].retry, - default_timeout=self._method_configs["UpdateMaster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.UpdateMasterRequest( - master_version=master_version, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_master"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_master_auth( - self, - action, - update, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets master auth materials. Currently supports changing the admin password - or a specific cluster, either via password generation or explicitly setting - the password. - - Example: - >>> from google.cloud import container_v1 - >>> from google.cloud.container_v1 import enums - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `action`: - >>> action = enums.SetMasterAuthRequest.Action.UNKNOWN - >>> - >>> # TODO: Initialize `update`: - >>> update = {} - >>> - >>> response = client.set_master_auth(action, update) - - Args: - action (~google.cloud.container_v1.types.Action): Required. The exact form of action to be taken on the master auth. - update (Union[dict, ~google.cloud.container_v1.types.MasterAuth]): Required. A description of the update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.MasterAuth` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to set auth. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_master_auth" not in self._inner_api_calls: - self._inner_api_calls[ - "set_master_auth" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_master_auth, - default_retry=self._method_configs["SetMasterAuth"].retry, - default_timeout=self._method_configs["SetMasterAuth"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetMasterAuthRequest( - action=action, - update=update, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_master_auth"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_operations( - self, - project_id=None, - zone=None, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all operations in a project in a specific zone or all zones. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.list_operations() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the parent field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ to - return operations for, or ``-`` for all zones. This field has been - deprecated and replaced by the parent field. - parent (str): The parent (project and location) where the operations will be - listed. Specified in the format ``projects/*/locations/*``. Location "-" - matches all zones and all regions. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.ListOperationsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_operations" not in self._inner_api_calls: - self._inner_api_calls[ - "list_operations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_operations, - default_retry=self._method_configs["ListOperations"].retry, - default_timeout=self._method_configs["ListOperations"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListOperationsRequest( - project_id=project_id, zone=zone, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_operations"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_operation( - self, - project_id=None, - zone=None, - operation_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the specified operation. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.get_operation() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - operation_id (str): Deprecated. The server-assigned ``name`` of the operation. This - field has been deprecated and replaced by the name field. - name (str): The name (project, location, operation id) of the operation to get. - Specified in the format ``projects/*/locations/*/operations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "get_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_operation, - default_retry=self._method_configs["GetOperation"].retry, - default_timeout=self._method_configs["GetOperation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetOperationRequest( - project_id=project_id, zone=zone, operation_id=operation_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def cancel_operation( - self, - project_id=None, - zone=None, - operation_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Cancels the specified operation. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> client.cancel_operation() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the operation resides. This field has been deprecated and replaced - by the name field. - operation_id (str): Deprecated. The server-assigned ``name`` of the operation. This - field has been deprecated and replaced by the name field. - name (str): The name (project, location, operation id) of the operation to - cancel. Specified in the format ``projects/*/locations/*/operations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "cancel_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "cancel_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.cancel_operation, - default_retry=self._method_configs["CancelOperation"].retry, - default_timeout=self._method_configs["CancelOperation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CancelOperationRequest( - project_id=project_id, zone=zone, operation_id=operation_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["cancel_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_server_config( - self, - project_id=None, - zone=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns configuration info about the Google Kubernetes Engine service. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.get_server_config() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ to - return operations for. This field has been deprecated and replaced by - the name field. - name (str): The name (project and location) of the server config to get, - specified in the format ``projects/*/locations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.ServerConfig` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_server_config" not in self._inner_api_calls: - self._inner_api_calls[ - "get_server_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_server_config, - default_retry=self._method_configs["GetServerConfig"].retry, - default_timeout=self._method_configs["GetServerConfig"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetServerConfigRequest( - project_id=project_id, zone=zone, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_server_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_node_pools( - self, - project_id=None, - zone=None, - cluster_id=None, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the node pools for a cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.list_node_pools() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the parent field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the parent field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the parent field. - parent (str): The parent (project, location, cluster id) where the node pools will - be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.ListNodePoolsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_node_pools" not in self._inner_api_calls: - self._inner_api_calls[ - "list_node_pools" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_node_pools, - default_retry=self._method_configs["ListNodePools"].retry, - default_timeout=self._method_configs["ListNodePools"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListNodePoolsRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_node_pools"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_node_pool( - self, - project_id=None, - zone=None, - cluster_id=None, - node_pool_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves the requested node pool. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.get_node_pool() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Deprecated. The name of the node pool. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node pool - to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.NodePool` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "get_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_node_pool, - default_retry=self._method_configs["GetNodePool"].retry, - default_timeout=self._method_configs["GetNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_node_pool( - self, - node_pool, - project_id=None, - zone=None, - cluster_id=None, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a node pool for a cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `node_pool`: - >>> node_pool = {} - >>> - >>> response = client.create_node_pool(node_pool) - - Args: - node_pool (Union[dict, ~google.cloud.container_v1.types.NodePool]): Required. The node pool to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.NodePool` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the parent field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the parent field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the parent field. - parent (str): The parent (project, location, cluster id) where the node pool will - be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "create_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_node_pool, - default_retry=self._method_configs["CreateNodePool"].retry, - default_timeout=self._method_configs["CreateNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CreateNodePoolRequest( - node_pool=node_pool, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def rollback_node_pool_upgrade( - self, - project_id=None, - zone=None, - cluster_id=None, - node_pool_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Rolls back a previously Aborted or Failed NodePool upgrade. - This makes no changes if the last upgrade successfully completed. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.rollback_node_pool_upgrade() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to rollback. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Deprecated. The name of the node pool to rollback. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node poll - to rollback upgrade. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "rollback_node_pool_upgrade" not in self._inner_api_calls: - self._inner_api_calls[ - "rollback_node_pool_upgrade" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.rollback_node_pool_upgrade, - default_retry=self._method_configs["RollbackNodePoolUpgrade"].retry, - default_timeout=self._method_configs["RollbackNodePoolUpgrade"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.RollbackNodePoolUpgradeRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["rollback_node_pool_upgrade"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_node_pool_management( - self, - management, - project_id=None, - zone=None, - cluster_id=None, - node_pool_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the NodeManagement options for a node pool. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `management`: - >>> management = {} - >>> - >>> response = client.set_node_pool_management(management) - - Args: - management (Union[dict, ~google.cloud.container_v1.types.NodeManagement]): Required. NodeManagement configuration for the node pool. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.NodeManagement` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to update. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Deprecated. The name of the node pool to update. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node pool - to set management properties. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_node_pool_management" not in self._inner_api_calls: - self._inner_api_calls[ - "set_node_pool_management" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_node_pool_management, - default_retry=self._method_configs["SetNodePoolManagement"].retry, - default_timeout=self._method_configs["SetNodePoolManagement"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNodePoolManagementRequest( - management=management, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_node_pool_management"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_labels( - self, - resource_labels, - label_fingerprint, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets labels on a cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `resource_labels`: - >>> resource_labels = {} - >>> - >>> # TODO: Initialize `label_fingerprint`: - >>> label_fingerprint = '' - >>> - >>> response = client.set_labels(resource_labels, label_fingerprint) - - Args: - resource_labels (dict[str -> str]): Required. The labels to set for that cluster. - label_fingerprint (str): Required. The fingerprint of the previous set of labels for this resource, - used to detect conflicts. The fingerprint is initially generated by - Kubernetes Engine and changes after every request to modify or update - labels. You must always provide an up-to-date fingerprint hash when - updating or changing labels. Make a get() request to the - resource to get the latest fingerprint. - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster id) of the cluster to set - labels. Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_labels" not in self._inner_api_calls: - self._inner_api_calls[ - "set_labels" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_labels, - default_retry=self._method_configs["SetLabels"].retry, - default_timeout=self._method_configs["SetLabels"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLabelsRequest( - resource_labels=resource_labels, - label_fingerprint=label_fingerprint, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_labels"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_legacy_abac( - self, - enabled, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Enables or disables the ABAC authorization mechanism on a cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `enabled`: - >>> enabled = False - >>> - >>> response = client.set_legacy_abac(enabled) - - Args: - enabled (bool): Required. Whether ABAC authorization will be enabled in the cluster. - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to update. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster id) of the cluster to set - legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_legacy_abac" not in self._inner_api_calls: - self._inner_api_calls[ - "set_legacy_abac" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_legacy_abac, - default_retry=self._method_configs["SetLegacyAbac"].retry, - default_timeout=self._method_configs["SetLegacyAbac"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLegacyAbacRequest( - enabled=enabled, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_legacy_abac"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def start_i_p_rotation( - self, - project_id=None, - zone=None, - cluster_id=None, - name=None, - rotate_credentials=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts master IP rotation. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.start_i_p_rotation() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster id) of the cluster to start IP - rotation. Specified in the format ``projects/*/locations/*/clusters/*``. - rotate_credentials (bool): Whether to rotate credentials during IP rotation. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "start_i_p_rotation" not in self._inner_api_calls: - self._inner_api_calls[ - "start_i_p_rotation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.start_i_p_rotation, - default_retry=self._method_configs["StartIPRotation"].retry, - default_timeout=self._method_configs["StartIPRotation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.StartIPRotationRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - rotate_credentials=rotate_credentials, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["start_i_p_rotation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def complete_i_p_rotation( - self, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Completes master IP rotation. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> response = client.complete_i_p_rotation() - - Args: - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster id) of the cluster to complete - IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "complete_i_p_rotation" not in self._inner_api_calls: - self._inner_api_calls[ - "complete_i_p_rotation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.complete_i_p_rotation, - default_retry=self._method_configs["CompleteIPRotation"].retry, - default_timeout=self._method_configs["CompleteIPRotation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CompleteIPRotationRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["complete_i_p_rotation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_node_pool_size( - self, - node_count, - project_id=None, - zone=None, - cluster_id=None, - node_pool_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the size for a specific node pool. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `node_count`: - >>> node_count = 0 - >>> - >>> response = client.set_node_pool_size(node_count) - - Args: - node_count (int): Required. The desired node count for the pool. - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. This field - has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster to update. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Deprecated. The name of the node pool to update. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node pool - to set size. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_node_pool_size" not in self._inner_api_calls: - self._inner_api_calls[ - "set_node_pool_size" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_node_pool_size, - default_retry=self._method_configs["SetNodePoolSize"].retry, - default_timeout=self._method_configs["SetNodePoolSize"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNodePoolSizeRequest( - node_count=node_count, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_node_pool_size"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_network_policy( - self, - network_policy, - project_id=None, - zone=None, - cluster_id=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Enables or disables Network Policy for a cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `network_policy`: - >>> network_policy = {} - >>> - >>> response = client.set_network_policy(network_policy) - - Args: - network_policy (Union[dict, ~google.cloud.container_v1.types.NetworkPolicy]): Required. Configuration options for the NetworkPolicy feature. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.NetworkPolicy` - project_id (str): Deprecated. The Google Developers Console `project ID or project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster id) of the cluster to set - networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_network_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_network_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_network_policy, - default_retry=self._method_configs["SetNetworkPolicy"].retry, - default_timeout=self._method_configs["SetNetworkPolicy"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNetworkPolicyRequest( - network_policy=network_policy, - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_network_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_maintenance_policy( - self, - project_id, - zone, - cluster_id, - maintenance_policy, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the maintenance policy for a cluster. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `maintenance_policy`: - >>> maintenance_policy = {} - >>> - >>> response = client.set_maintenance_policy(project_id, zone, cluster_id, maintenance_policy) - - Args: - project_id (str): Required. The Google Developers Console `project ID or project - number `__. - zone (str): Required. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. - cluster_id (str): Required. The name of the cluster to update. - maintenance_policy (Union[dict, ~google.cloud.container_v1.types.MaintenancePolicy]): Required. The maintenance policy to be set for the cluster. An empty field - clears the existing maintenance policy. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1.types.MaintenancePolicy` - name (str): The name (project, location, cluster id) of the cluster to set - maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_maintenance_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_maintenance_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_maintenance_policy, - default_retry=self._method_configs["SetMaintenancePolicy"].retry, - default_timeout=self._method_configs["SetMaintenancePolicy"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetMaintenancePolicyRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - maintenance_policy=maintenance_policy, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_maintenance_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_usable_subnetworks( - self, - parent=None, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists subnetworks that are usable for creating clusters in a project. - - Example: - >>> from google.cloud import container_v1 - >>> - >>> client = container_v1.ClusterManagerClient() - >>> - >>> # Iterate over all results - >>> for element in client.list_usable_subnetworks(): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_usable_subnetworks().pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): The parent project where subnetworks are usable. Specified in the - format ``projects/*``. - filter_ (str): Filtering currently only supports equality on the networkProjectId - and must be in the form: "networkProjectId=[PROJECTID]", where - ``networkProjectId`` is the project which owns the listed subnetworks. - This defaults to the parent project ID. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.container_v1.types.UsableSubnetwork` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_usable_subnetworks" not in self._inner_api_calls: - self._inner_api_calls[ - "list_usable_subnetworks" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_usable_subnetworks, - default_retry=self._method_configs["ListUsableSubnetworks"].retry, - default_timeout=self._method_configs["ListUsableSubnetworks"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListUsableSubnetworksRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_usable_subnetworks"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="subnetworks", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/container_v1/gapic/cluster_manager_client_config.py b/google/cloud/container_v1/gapic/cluster_manager_client_config.py deleted file mode 100644 index bee6ad66..00000000 --- a/google/cloud/container_v1/gapic/cluster_manager_client_config.py +++ /dev/null @@ -1,178 +0,0 @@ -config = { - "interfaces": { - "google.container.v1.ClusterManager": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateCluster": { - "timeout_millis": 45000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNodePoolAutoscaling": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLoggingService": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetMonitoringService": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetAddonsConfig": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLocations": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateMaster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetMasterAuth": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListOperations": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetOperation": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CancelOperation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetServerConfig": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListNodePools": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "RollbackNodePoolUpgrade": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNodePoolManagement": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLabels": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLegacyAbac": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "StartIPRotation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CompleteIPRotation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNodePoolSize": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNetworkPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetMaintenancePolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListUsableSubnetworks": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/container_v1/gapic/enums.py b/google/cloud/container_v1/gapic/enums.py deleted file mode 100644 index d2115633..00000000 --- a/google/cloud/container_v1/gapic/enums.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class Cluster(object): - class Status(enum.IntEnum): - """ - The current status of the cluster. - - Attributes: - STATUS_UNSPECIFIED (int): Not set. - PROVISIONING (int): The PROVISIONING state indicates the cluster is being created. - RUNNING (int): The RUNNING state indicates the cluster has been created and is fully - usable. - RECONCILING (int): The RECONCILING state indicates that some work is actively being - done on the cluster, such as upgrading the master or node software. - Details can be found in the ``statusMessage`` field. - STOPPING (int): The STOPPING state indicates the cluster is being deleted. - ERROR (int): The ERROR state indicates the cluster may be unusable. Details can - be found in the ``statusMessage`` field. - DEGRADED (int): The DEGRADED state indicates the cluster requires user action to - restore full functionality. Details can be found in the - ``statusMessage`` field. - """ - - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RECONCILING = 3 - STOPPING = 4 - ERROR = 5 - DEGRADED = 6 - - -class DatabaseEncryption(object): - class State(enum.IntEnum): - """ - State of etcd encryption. - - Attributes: - UNKNOWN (int): Should never be set - ENCRYPTED (int): Secrets in etcd are encrypted. - DECRYPTED (int): Secrets in etcd are stored in plain text (at etcd level) - this is - unrelated to GCE level full disk encryption. - """ - - UNKNOWN = 0 - ENCRYPTED = 1 - DECRYPTED = 2 - - -class NetworkPolicy(object): - class Provider(enum.IntEnum): - """ - Allowed Network Policy providers. - - Attributes: - PROVIDER_UNSPECIFIED (int): Not set - CALICO (int): Tigera (Calico Felix). - """ - - PROVIDER_UNSPECIFIED = 0 - CALICO = 1 - - -class NodePool(object): - class Status(enum.IntEnum): - """ - The current status of the node pool instance. - - Attributes: - STATUS_UNSPECIFIED (int): Not set. - PROVISIONING (int): The PROVISIONING state indicates the node pool is being created. - RUNNING (int): The RUNNING state indicates the node pool has been created - and is fully usable. - RUNNING_WITH_ERROR (int): The RUNNING_WITH_ERROR state indicates the node pool has been - created and is partially usable. Some error state has occurred and some - functionality may be impaired. Customer may need to reissue a request or - trigger a new update. - RECONCILING (int): The RECONCILING state indicates that some work is actively being - done on the node pool, such as upgrading node software. Details can be - found in the ``statusMessage`` field. - STOPPING (int): The STOPPING state indicates the node pool is being deleted. - ERROR (int): The ERROR state indicates the node pool may be unusable. Details can - be found in the ``statusMessage`` field. - """ - - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RUNNING_WITH_ERROR = 3 - RECONCILING = 4 - STOPPING = 5 - ERROR = 6 - - -class NodeTaint(object): - class Effect(enum.IntEnum): - """ - Possible values for Effect in taint. - - Attributes: - EFFECT_UNSPECIFIED (int): Not set - NO_SCHEDULE (int): NoSchedule - PREFER_NO_SCHEDULE (int): PreferNoSchedule - NO_EXECUTE (int): NoExecute - """ - - EFFECT_UNSPECIFIED = 0 - NO_SCHEDULE = 1 - PREFER_NO_SCHEDULE = 2 - NO_EXECUTE = 3 - - -class Operation(object): - class Status(enum.IntEnum): - """ - Current status of the operation. - - Attributes: - STATUS_UNSPECIFIED (int): Not set. - PENDING (int): The operation has been created. - RUNNING (int): The operation is currently running. - DONE (int): The operation is done, either cancelled or completed. - ABORTING (int): The operation is aborting. - """ - - STATUS_UNSPECIFIED = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - ABORTING = 4 - - class Type(enum.IntEnum): - """ - Operation type. - - Attributes: - TYPE_UNSPECIFIED (int): Not set. - CREATE_CLUSTER (int): Cluster create. - DELETE_CLUSTER (int): Cluster delete. - UPGRADE_MASTER (int): A master upgrade. - UPGRADE_NODES (int): A node upgrade. - REPAIR_CLUSTER (int): Cluster repair. - UPDATE_CLUSTER (int): Cluster update. - CREATE_NODE_POOL (int): Node pool create. - DELETE_NODE_POOL (int): Node pool delete. - SET_NODE_POOL_MANAGEMENT (int): Set node pool management. - AUTO_REPAIR_NODES (int): Automatic node pool repair. - AUTO_UPGRADE_NODES (int): Automatic node upgrade. - SET_LABELS (int): Set labels. - SET_MASTER_AUTH (int): Set/generate master auth materials - SET_NODE_POOL_SIZE (int): Set node pool size. - SET_NETWORK_POLICY (int): Updates network policy for a cluster. - SET_MAINTENANCE_POLICY (int): Set the maintenance policy. - """ - - TYPE_UNSPECIFIED = 0 - CREATE_CLUSTER = 1 - DELETE_CLUSTER = 2 - UPGRADE_MASTER = 3 - UPGRADE_NODES = 4 - REPAIR_CLUSTER = 5 - UPDATE_CLUSTER = 6 - CREATE_NODE_POOL = 7 - DELETE_NODE_POOL = 8 - SET_NODE_POOL_MANAGEMENT = 9 - AUTO_REPAIR_NODES = 10 - AUTO_UPGRADE_NODES = 11 - SET_LABELS = 12 - SET_MASTER_AUTH = 13 - SET_NODE_POOL_SIZE = 14 - SET_NETWORK_POLICY = 15 - SET_MAINTENANCE_POLICY = 16 - - -class SetMasterAuthRequest(object): - class Action(enum.IntEnum): - """ - Operation type: what type update to perform. - - Attributes: - UNKNOWN (int): Operation is unknown and will error out. - SET_PASSWORD (int): Set the password to a user generated value. - GENERATE_PASSWORD (int): Generate a new password and set it to that. - SET_USERNAME (int): Set the username. If an empty username is provided, basic authentication - is disabled for the cluster. If a non-empty username is provided, basic - authentication is enabled, with either a provided password or a generated - one. - """ - - UNKNOWN = 0 - SET_PASSWORD = 1 - GENERATE_PASSWORD = 2 - SET_USERNAME = 3 - - -class StatusCondition(object): - class Code(enum.IntEnum): - """ - Code for each condition - - Attributes: - UNKNOWN (int): UNKNOWN indicates a generic condition. - GCE_STOCKOUT (int): GCE_STOCKOUT indicates a Google Compute Engine stockout. - GKE_SERVICE_ACCOUNT_DELETED (int): GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their - robot service account. - GCE_QUOTA_EXCEEDED (int): Google Compute Engine quota was exceeded. - SET_BY_OPERATOR (int): Cluster state was manually changed by an SRE due to a system logic error. - CLOUD_KMS_KEY_ERROR (int): Unable to perform an encrypt operation against the CloudKMS key used for - etcd level encryption. - More codes TBA - """ - - UNKNOWN = 0 - GCE_STOCKOUT = 1 - GKE_SERVICE_ACCOUNT_DELETED = 2 - GCE_QUOTA_EXCEEDED = 3 - SET_BY_OPERATOR = 4 - CLOUD_KMS_KEY_ERROR = 7 - - -class UsableSubnetworkSecondaryRange(object): - class Status(enum.IntEnum): - """ - Status shows the current usage of a secondary IP range. - - Attributes: - UNKNOWN (int): UNKNOWN is the zero value of the Status enum. It's not a valid status. - UNUSED (int): UNUSED denotes that this range is unclaimed by any cluster. - IN_USE_SERVICE (int): IN_USE_SERVICE denotes that this range is claimed by a cluster for - services. It cannot be used for other clusters. - IN_USE_SHAREABLE_POD (int): IN_USE_SHAREABLE_POD denotes this range was created by the network - admin and is currently claimed by a cluster for pods. It can only be - used by other clusters as a pod range. - IN_USE_MANAGED_POD (int): IN_USE_MANAGED_POD denotes this range was created by GKE and is - claimed for pods. It cannot be used for other clusters. - """ - - UNKNOWN = 0 - UNUSED = 1 - IN_USE_SERVICE = 2 - IN_USE_SHAREABLE_POD = 3 - IN_USE_MANAGED_POD = 4 diff --git a/google/cloud/container_v1/gapic/transports/cluster_manager_grpc_transport.py b/google/cloud/container_v1/gapic/transports/cluster_manager_grpc_transport.py deleted file mode 100644 index 37c93439..00000000 --- a/google/cloud/container_v1/gapic/transports/cluster_manager_grpc_transport.py +++ /dev/null @@ -1,536 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.container_v1.proto import cluster_service_pb2_grpc - - -class ClusterManagerGrpcTransport(object): - """gRPC transport class providing stubs for - google.container.v1 ClusterManager API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="container.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "cluster_manager_stub": cluster_service_pb2_grpc.ClusterManagerStub( - channel - ), - } - - @classmethod - def create_channel( - cls, address="container.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.delete_cluster`. - - Deletes the cluster, including the Kubernetes endpoint and all worker - nodes. - - Firewalls and routes that were configured during cluster creation - are also deleted. - - Other Google Compute Engine resources that might be in use by the cluster, - such as load balancer resources, are not deleted if they weren't present - when the cluster was initially created. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].DeleteCluster - - @property - def delete_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.delete_node_pool`. - - Deletes a node pool from a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].DeleteNodePool - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_clusters`. - - Lists all clusters owned by a project in either the specified zone or all - zones. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListClusters - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_cluster`. - - Gets the details of a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetCluster - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.create_cluster`. - - Creates a cluster, consisting of the specified number and type of - Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, the - Kubelet creates routes for each node to allow the containers on that - node to communicate with all other instances in the cluster. - - Finally, an entry is added to the project's global metadata indicating - which CIDR range the cluster is using. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CreateCluster - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.update_cluster`. - - Updates the settings of a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].UpdateCluster - - @property - def update_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.update_node_pool`. - - Updates the version and/or image type for the specified node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].UpdateNodePool - - @property - def set_node_pool_autoscaling(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_node_pool_autoscaling`. - - Sets the autoscaling settings for the specified node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNodePoolAutoscaling - - @property - def set_logging_service(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_logging_service`. - - Sets the logging service for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLoggingService - - @property - def set_monitoring_service(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_monitoring_service`. - - Sets the monitoring service for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetMonitoringService - - @property - def set_addons_config(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_addons_config`. - - Sets the addons for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetAddonsConfig - - @property - def set_locations(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_locations`. - - Sets the locations for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLocations - - @property - def update_master(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.update_master`. - - Updates the master for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].UpdateMaster - - @property - def set_master_auth(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_master_auth`. - - Sets master auth materials. Currently supports changing the admin password - or a specific cluster, either via password generation or explicitly setting - the password. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetMasterAuth - - @property - def list_operations(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_operations`. - - Lists all operations in a project in a specific zone or all zones. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListOperations - - @property - def get_operation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_operation`. - - Gets the specified operation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetOperation - - @property - def cancel_operation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.cancel_operation`. - - Cancels the specified operation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CancelOperation - - @property - def get_server_config(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_server_config`. - - Returns configuration info about the Google Kubernetes Engine service. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetServerConfig - - @property - def list_node_pools(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_node_pools`. - - Lists the node pools for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListNodePools - - @property - def get_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_node_pool`. - - Retrieves the requested node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetNodePool - - @property - def create_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.create_node_pool`. - - Creates a node pool for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CreateNodePool - - @property - def rollback_node_pool_upgrade(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.rollback_node_pool_upgrade`. - - Rolls back a previously Aborted or Failed NodePool upgrade. - This makes no changes if the last upgrade successfully completed. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].RollbackNodePoolUpgrade - - @property - def set_node_pool_management(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_node_pool_management`. - - Sets the NodeManagement options for a node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNodePoolManagement - - @property - def set_labels(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_labels`. - - Sets labels on a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLabels - - @property - def set_legacy_abac(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_legacy_abac`. - - Enables or disables the ABAC authorization mechanism on a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLegacyAbac - - @property - def start_i_p_rotation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.start_i_p_rotation`. - - Starts master IP rotation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].StartIPRotation - - @property - def complete_i_p_rotation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.complete_i_p_rotation`. - - Completes master IP rotation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CompleteIPRotation - - @property - def set_node_pool_size(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_node_pool_size`. - - Sets the size for a specific node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNodePoolSize - - @property - def set_network_policy(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_network_policy`. - - Enables or disables Network Policy for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNetworkPolicy - - @property - def set_maintenance_policy(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_maintenance_policy`. - - Sets the maintenance policy for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetMaintenancePolicy - - @property - def list_usable_subnetworks(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_usable_subnetworks`. - - Lists subnetworks that are usable for creating clusters in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListUsableSubnetworks diff --git a/google/cloud/container_v1/proto/cluster_service_pb2.py b/google/cloud/container_v1/proto/cluster_service_pb2.py deleted file mode 100644 index e110e056..00000000 --- a/google/cloud/container_v1/proto/cluster_service_pb2.py +++ /dev/null @@ -1,13346 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/container_v1/proto/cluster_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/container_v1/proto/cluster_service.proto", - package="google.container.v1", - syntax="proto3", - serialized_options=b"\n\027com.google.container.v1B\023ClusterServiceProtoP\001Z\n\x16\x44\x61ilyMaintenanceWindow\x12\x12\n\nstart_time\x18\x02 \x01(\t\x12\x10\n\x08\x64uration\x18\x03 \x01(\t"\xc6\x01\n\x1cSetNodePoolManagementRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12\x18\n\x0cnode_pool_id\x18\x04 \x01(\tB\x02\x18\x01\x12<\n\nmanagement\x18\x05 \x01(\x0b\x32#.google.container.v1.NodeManagementB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t"\x9b\x01\n\x16SetNodePoolSizeRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12\x18\n\x0cnode_pool_id\x18\x04 \x01(\tB\x02\x18\x01\x12\x17\n\nnode_count\x18\x05 \x01(\x05\x42\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t"\x8a\x01\n\x1eRollbackNodePoolUpgradeRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12\x18\n\x0cnode_pool_id\x18\x04 \x01(\tB\x02\x18\x01\x12\x0c\n\x04name\x18\x06 \x01(\t"J\n\x15ListNodePoolsResponse\x12\x31\n\nnode_pools\x18\x01 \x03(\x0b\x32\x1d.google.container.v1.NodePool"\xff\x01\n\x12\x43lusterAutoscaling\x12$\n\x1c\x65nable_node_autoprovisioning\x18\x01 \x01(\x08\x12;\n\x0fresource_limits\x18\x02 \x03(\x0b\x32".google.container.v1.ResourceLimit\x12\x62\n#autoprovisioning_node_pool_defaults\x18\x04 \x01(\x0b\x32\x35.google.container.v1.AutoprovisioningNodePoolDefaults\x12"\n\x1a\x61utoprovisioning_locations\x18\x05 \x03(\t"Q\n AutoprovisioningNodePoolDefaults\x12\x14\n\x0coauth_scopes\x18\x01 \x03(\t\x12\x17\n\x0fservice_account\x18\x02 \x01(\t"H\n\rResourceLimit\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x0f\n\x07minimum\x18\x02 \x01(\x03\x12\x0f\n\x07maximum\x18\x03 \x01(\x03"o\n\x13NodePoolAutoscaling\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x16\n\x0emin_node_count\x18\x02 \x01(\x05\x12\x16\n\x0emax_node_count\x18\x03 \x01(\x05\x12\x17\n\x0f\x61utoprovisioned\x18\x04 \x01(\x08"\x92\x02\n\x10SetLabelsRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12W\n\x0fresource_labels\x18\x04 \x03(\x0b\x32\x39.google.container.v1.SetLabelsRequest.ResourceLabelsEntryB\x03\xe0\x41\x02\x12\x1e\n\x11label_fingerprint\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t\x1a\x35\n\x13ResourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"|\n\x14SetLegacyAbacRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12\x14\n\x07\x65nabled\x18\x04 \x01(\x08\x42\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\x84\x01\n\x16StartIPRotationRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x1a\n\x12rotate_credentials\x18\x07 \x01(\x08"k\n\x19\x43ompleteIPRotationRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12\x0c\n\x04name\x18\x07 \x01(\t"H\n\x11\x41\x63\x63\x65leratorConfig\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x01 \x01(\x03\x12\x18\n\x10\x61\x63\x63\x65lerator_type\x18\x02 \x01(\t"\xaa\x01\n\x17SetNetworkPolicyRequest\x12\x16\n\nproject_id\x18\x01 \x01(\tB\x02\x18\x01\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\ncluster_id\x18\x03 \x01(\tB\x02\x18\x01\x12?\n\x0enetwork_policy\x18\x04 \x01(\x0b\x32".google.container.v1.NetworkPolicyB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\xb9\x01\n\x1bSetMaintenancePolicyRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04zone\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x17\n\ncluster_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12G\n\x12maintenance_policy\x18\x04 \x01(\x0b\x32&.google.container.v1.MaintenancePolicyB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x05 \x01(\t"\xea\x01\n\x0fStatusCondition\x12\x37\n\x04\x63ode\x18\x01 \x01(\x0e\x32).google.container.v1.StatusCondition.Code\x12\x0f\n\x07message\x18\x02 \x01(\t"\x8c\x01\n\x04\x43ode\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x10\n\x0cGCE_STOCKOUT\x10\x01\x12\x1f\n\x1bGKE_SERVICE_ACCOUNT_DELETED\x10\x02\x12\x16\n\x12GCE_QUOTA_EXCEEDED\x10\x03\x12\x13\n\x0fSET_BY_OPERATOR\x10\x04\x12\x17\n\x13\x43LOUD_KMS_KEY_ERROR\x10\x07"Z\n\rNetworkConfig\x12\x0f\n\x07network\x18\x01 \x01(\t\x12\x12\n\nsubnetwork\x18\x02 \x01(\t\x12$\n\x1c\x65nable_intra_node_visibility\x18\x05 \x01(\x08",\n\x19IntraNodeVisibilityConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08".\n\x11MaxPodsConstraint\x12\x19\n\x11max_pods_per_node\x18\x01 \x01(\x03"\x98\x01\n\x12\x44\x61tabaseEncryption\x12<\n\x05state\x18\x02 \x01(\x0e\x32-.google.container.v1.DatabaseEncryption.State\x12\x10\n\x08key_name\x18\x01 \x01(\t"2\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tENCRYPTED\x10\x01\x12\r\n\tDECRYPTED\x10\x02"e\n\x1cListUsableSubnetworksRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"t\n\x1dListUsableSubnetworksResponse\x12:\n\x0bsubnetworks\x18\x01 \x03(\x0b\x32%.google.container.v1.UsableSubnetwork\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x80\x02\n\x1eUsableSubnetworkSecondaryRange\x12\x12\n\nrange_name\x18\x01 \x01(\t\x12\x15\n\rip_cidr_range\x18\x02 \x01(\t\x12J\n\x06status\x18\x03 \x01(\x0e\x32:.google.container.v1.UsableSubnetworkSecondaryRange.Status"g\n\x06Status\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06UNUSED\x10\x01\x12\x12\n\x0eIN_USE_SERVICE\x10\x02\x12\x18\n\x14IN_USE_SHAREABLE_POD\x10\x03\x12\x16\n\x12IN_USE_MANAGED_POD\x10\x04"\xb8\x01\n\x10UsableSubnetwork\x12\x12\n\nsubnetwork\x18\x01 \x01(\t\x12\x0f\n\x07network\x18\x02 \x01(\t\x12\x15\n\rip_cidr_range\x18\x03 \x01(\t\x12P\n\x13secondary_ip_ranges\x18\x04 \x03(\x0b\x32\x33.google.container.v1.UsableSubnetworkSecondaryRange\x12\x16\n\x0estatus_message\x18\x05 \x01(\t"\xed\x02\n\x19ResourceUsageExportConfig\x12`\n\x14\x62igquery_destination\x18\x01 \x01(\x0b\x32\x42.google.container.v1.ResourceUsageExportConfig.BigQueryDestination\x12&\n\x1e\x65nable_network_egress_metering\x18\x02 \x01(\x08\x12m\n\x1b\x63onsumption_metering_config\x18\x03 \x01(\x0b\x32H.google.container.v1.ResourceUsageExportConfig.ConsumptionMeteringConfig\x1a)\n\x13\x42igQueryDestination\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x1a,\n\x19\x43onsumptionMeteringConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08")\n\x16VerticalPodAutoscaling\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x32\xd2\x44\n\x0e\x43lusterManager\x12\xe8\x01\n\x0cListClusters\x12(.google.container.v1.ListClustersRequest\x1a).google.container.v1.ListClustersResponse"\x82\x01\x82\xd3\xe4\x93\x02\x61\x12,/v1/{parent=projects/*/locations/*}/clustersZ1\x12//v1/projects/{project_id}/zones/{zone}/clusters\xda\x41\x0fproject_id,zone\xda\x41\x06parent\x12\xed\x01\n\nGetCluster\x12&.google.container.v1.GetClusterRequest\x1a\x1c.google.container.v1.Cluster"\x98\x01\x82\xd3\xe4\x93\x02n\x12,/v1/{name=projects/*/locations/*/clusters/*}Z>\x12*/v1/{name=projects/*/locations/*/clusters/*}:setResourceLabels:\x01*ZP"K/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/resourceLabels:\x01*\x12\xa5\x02\n\rSetLegacyAbac\x12).google.container.v1.SetLegacyAbacRequest\x1a\x1e.google.container.v1.Operation"\xc8\x01\x82\xd3\xe4\x93\x02\x8d\x01":/v1/{name=projects/*/locations/*/clusters/*}:setLegacyAbac:\x01*ZL"G/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/legacyAbac:\x01*\xda\x41"project_id,zone,cluster_id,enabled\xda\x41\x0cname,enabled\x12\xa0\x02\n\x0fStartIPRotation\x12+.google.container.v1.StartIPRotationRequest\x1a\x1e.google.container.v1.Operation"\xbf\x01\x82\xd3\xe4\x93\x02\x94\x01"`__ (e.g. - ``n1-standard-1``). If unspecified, the default machine type - is ``n1-standard-1``. - disk_size_gb: - Size of the disk attached to each node, specified in GB. The - smallest allowed disk size is 10GB. If unspecified, the - default disk size is 100GB. - oauth_scopes: - The set of Google API scopes to be made available on all of - the node VMs under the “default” service account. The - following scopes are recommended, but not required, and by - default are not included: - - ``https://www.googleapis.com/auth/compute`` is required for - mounting persistent storage on your nodes. - - ``https://www.googleapis.com/auth/devstorage.read_only`` is - required for communicating with **gcr.io** (the `Google - Container Registry `__). If unspecified, no scopes are added, unless - Cloud Logging or Cloud Monitoring are enabled, in which case - their required scopes will be added. - service_account: - The Google Cloud Platform Service Account to be used by the - node VMs. If no Service Account is specified, the “default” - service account is used. - metadata: - The metadata key/value pairs assigned to instances in the - cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and - be less than 128 bytes in length. These are reflected as part - of a URL in the metadata server. Additionally, to avoid - ambiguity, keys must not conflict with any other metadata keys - for the project or be one of the reserved keys: “cluster- - location” “cluster-name” “cluster-uid” “configure-sh” - “containerd-configure-sh” “enable-os-login” “gci-update- - strategy” “gci-ensure-gke-docker” “instance-template” “kube- - env” “startup-script” “user-data” “disable-address-manager” - “windows-startup-script-ps1” “common-psm1” “k8s-node-setup- - psm1” “install-ssh-psm1” “user-profile-psm1” “serial-port- - logging-enable” Values are free-form strings, and only have - meaning as interpreted by the image running in the instance. - The only restriction placed on them is that each value’s size - must be less than or equal to 32 KB. The total size of all - keys and values must be less than 512 KB. - image_type: - The image type to use for this node. Note that for a given - image type, the latest version of it will be used. - labels: - The map of Kubernetes labels (key/value pairs) to be applied - to each node. These will added in addition to any default - label(s) that Kubernetes may apply to the node. In case of - conflict in label keys, the applied set may differ depending - on the Kubernetes version – it’s best to assume the behavior - is undefined and conflicts should be avoided. For more - information, including usage and the valid values, see: - https://kubernetes.io/docs/concepts/overview/working-with- - objects/labels/ - local_ssd_count: - The number of local SSD disks to be attached to the node. The - limit for this value is dependent upon the maximum number of - disks available on a machine per zone. See: - https://cloud.google.com/compute/docs/disks/local-ssd for more - information. - tags: - The list of instance tags applied to all nodes. Tags are used - to identify valid sources or targets for network firewalls and - are specified by the client during cluster or node pool - creation. Each tag within the list must comply with RFC1035. - preemptible: - Whether the nodes are created as preemptible VM instances. - See: - https://cloud.google.com/compute/docs/instances/preemptible - for more information about preemptible VM instances. - accelerators: - A list of hardware accelerators to be attached to each node. - See https://cloud.google.com/compute/docs/gpus for more - information about support for GPUs. - disk_type: - Type of the disk attached to each node (e.g. ‘pd-standard’ or - ‘pd-ssd’) If unspecified, the default disk type is ‘pd- - standard’ - min_cpu_platform: - Minimum CPU platform to be used by this instance. The instance - may be scheduled on the specified or newer CPU platform. - Applicable values are the friendly names of CPU platforms, - such as minCpuPlatform: “Intel Haswell” or minCpuPlatform: - “Intel Sandy Bridge”. For more information, read `how to - specify min CPU platform - `__ - taints: - List of kubernetes taints to be applied to each node. For - more information, including usage and the valid values, see: - https://kubernetes.io/docs/concepts/configuration/taint-and- - toleration/ - shielded_instance_config: - Shielded Instance options. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NodeConfig) - }, -) -_sym_db.RegisterMessage(NodeConfig) -_sym_db.RegisterMessage(NodeConfig.MetadataEntry) -_sym_db.RegisterMessage(NodeConfig.LabelsEntry) - -ShieldedInstanceConfig = _reflection.GeneratedProtocolMessageType( - "ShieldedInstanceConfig", - (_message.Message,), - { - "DESCRIPTOR": _SHIELDEDINSTANCECONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """A set of Shielded Instance options. - - Attributes: - enable_secure_boot: - Defines whether the instance has Secure Boot enabled. Secure - Boot helps ensure that the system only runs authentic software - by verifying the digital signature of all boot components, and - halting the boot process if signature verification fails. - enable_integrity_monitoring: - Defines whether the instance has integrity monitoring enabled. - Enables monitoring and attestation of the boot integrity of - the instance. The attestation is performed against the - integrity policy baseline. This baseline is initially derived - from the implicitly trusted boot image when the instance is - created. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ShieldedInstanceConfig) - }, -) -_sym_db.RegisterMessage(ShieldedInstanceConfig) - -NodeTaint = _reflection.GeneratedProtocolMessageType( - "NodeTaint", - (_message.Message,), - { - "DESCRIPTOR": _NODETAINT, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Kubernetes taint is comprised of three fields: key, value, and effect. - Effect can only be one of three types: NoSchedule, PreferNoSchedule or - NoExecute. For more information, including usage and the valid - values, see: https://kubernetes.io/docs/concepts/configuration/taint- - and-toleration/ - - Attributes: - key: - Key for taint. - value: - Value for taint. - effect: - Effect for taint. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NodeTaint) - }, -) -_sym_db.RegisterMessage(NodeTaint) - -MasterAuth = _reflection.GeneratedProtocolMessageType( - "MasterAuth", - (_message.Message,), - { - "DESCRIPTOR": _MASTERAUTH, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """The authentication information for accessing the master endpoint. - Authentication can be done using HTTP basic auth or using client - certificates. - - Attributes: - username: - The username to use for HTTP basic authentication to the - master endpoint. For clusters v1.6.0 and later, basic - authentication can be disabled by leaving username unspecified - (or setting it to the empty string). - password: - The password to use for HTTP basic authentication to the - master endpoint. Because the master endpoint is open to the - Internet, you should create a strong password. If a password - is provided for cluster creation, username must be non-empty. - client_certificate_config: - Configuration for client certificate authentication on the - cluster. For clusters before v1.12, if no configuration is - specified, a client certificate is issued. - cluster_ca_certificate: - [Output only] Base64-encoded public certificate that is the - root of trust for the cluster. - client_certificate: - [Output only] Base64-encoded public certificate used by - clients to authenticate to the cluster endpoint. - client_key: - [Output only] Base64-encoded private key used by clients to - authenticate to the cluster endpoint. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.MasterAuth) - }, -) -_sym_db.RegisterMessage(MasterAuth) - -ClientCertificateConfig = _reflection.GeneratedProtocolMessageType( - "ClientCertificateConfig", - (_message.Message,), - { - "DESCRIPTOR": _CLIENTCERTIFICATECONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for client certificates on the cluster. - - Attributes: - issue_client_certificate: - Issue a client certificate. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ClientCertificateConfig) - }, -) -_sym_db.RegisterMessage(ClientCertificateConfig) - -AddonsConfig = _reflection.GeneratedProtocolMessageType( - "AddonsConfig", - (_message.Message,), - { - "DESCRIPTOR": _ADDONSCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for the addons that can be automatically spun up in the - cluster, enabling additional functionality. - - Attributes: - http_load_balancing: - Configuration for the HTTP (L7) load balancing controller - addon, which makes it easy to set up HTTP load balancers for - services in a cluster. - horizontal_pod_autoscaling: - Configuration for the horizontal pod autoscaling feature, - which increases or decreases the number of replica pods a - replication controller has based on the resource usage of the - existing pods. - kubernetes_dashboard: - Configuration for the Kubernetes Dashboard. This addon is - deprecated, and will be disabled in 1.15. It is recommended to - use the Cloud Console to manage and monitor your Kubernetes - clusters, workloads and applications. For more information, - see: https://cloud.google.com/kubernetes- - engine/docs/concepts/dashboards - network_policy_config: - Configuration for NetworkPolicy. This only tracks whether the - addon is enabled or not on the Master, it does not track - whether network policy is enabled for the nodes. - cloud_run_config: - Configuration for the Cloud Run addon, which allows the user - to use a managed Knative service. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.AddonsConfig) - }, -) -_sym_db.RegisterMessage(AddonsConfig) - -HttpLoadBalancing = _reflection.GeneratedProtocolMessageType( - "HttpLoadBalancing", - (_message.Message,), - { - "DESCRIPTOR": _HTTPLOADBALANCING, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the HTTP (L7) load balancing controller - addon, which makes it easy to set up HTTP load balancers for services - in a cluster. - - Attributes: - disabled: - Whether the HTTP Load Balancing controller is enabled in the - cluster. When enabled, it runs a small pod in the cluster that - manages the load balancers. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.HttpLoadBalancing) - }, -) -_sym_db.RegisterMessage(HttpLoadBalancing) - -HorizontalPodAutoscaling = _reflection.GeneratedProtocolMessageType( - "HorizontalPodAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _HORIZONTALPODAUTOSCALING, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the horizontal pod autoscaling feature, - which increases or decreases the number of replica pods a replication - controller has based on the resource usage of the existing pods. - - Attributes: - disabled: - Whether the Horizontal Pod Autoscaling feature is enabled in - the cluster. When enabled, it ensures that a Heapster pod is - running in the cluster, which is also used by the Cloud - Monitoring service. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.HorizontalPodAutoscaling) - }, -) -_sym_db.RegisterMessage(HorizontalPodAutoscaling) - -KubernetesDashboard = _reflection.GeneratedProtocolMessageType( - "KubernetesDashboard", - (_message.Message,), - { - "DESCRIPTOR": _KUBERNETESDASHBOARD, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for the Kubernetes Dashboard. - - Attributes: - disabled: - Whether the Kubernetes Dashboard is enabled for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.KubernetesDashboard) - }, -) -_sym_db.RegisterMessage(KubernetesDashboard) - -NetworkPolicyConfig = _reflection.GeneratedProtocolMessageType( - "NetworkPolicyConfig", - (_message.Message,), - { - "DESCRIPTOR": _NETWORKPOLICYCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for NetworkPolicy. This only tracks whether the addon is - enabled or not on the Master, it does not track whether network policy - is enabled for the nodes. - - Attributes: - disabled: - Whether NetworkPolicy is enabled for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NetworkPolicyConfig) - }, -) -_sym_db.RegisterMessage(NetworkPolicyConfig) - -PrivateClusterConfig = _reflection.GeneratedProtocolMessageType( - "PrivateClusterConfig", - (_message.Message,), - { - "DESCRIPTOR": _PRIVATECLUSTERCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration options for private clusters. - - Attributes: - enable_private_nodes: - Whether nodes have internal IP addresses only. If enabled, all - nodes are given only RFC 1918 private addresses and - communicate with the master via private networking. - enable_private_endpoint: - Whether the master’s internal IP address is used as the - cluster endpoint. - master_ipv4_cidr_block: - The IP range in CIDR notation to use for the hosted master - network. This range will be used for assigning internal IP - addresses to the master or set of masters, as well as the ILB - VIP. This range must not overlap with any other ranges in use - within the cluster’s network. - private_endpoint: - Output only. The internal IP address of this cluster’s master - endpoint. - public_endpoint: - Output only. The external IP address of this cluster’s master - endpoint. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.PrivateClusterConfig) - }, -) -_sym_db.RegisterMessage(PrivateClusterConfig) - -AuthenticatorGroupsConfig = _reflection.GeneratedProtocolMessageType( - "AuthenticatorGroupsConfig", - (_message.Message,), - { - "DESCRIPTOR": _AUTHENTICATORGROUPSCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for returning group information from authenticators. - - Attributes: - enabled: - Whether this cluster should return group membership lookups - during authentication using a group of security groups. - security_group: - The name of the security group-of-groups to be used. Only - relevant if enabled = true. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.AuthenticatorGroupsConfig) - }, -) -_sym_db.RegisterMessage(AuthenticatorGroupsConfig) - -CloudRunConfig = _reflection.GeneratedProtocolMessageType( - "CloudRunConfig", - (_message.Message,), - { - "DESCRIPTOR": _CLOUDRUNCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the Cloud Run feature. - - Attributes: - disabled: - Whether Cloud Run addon is enabled for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.CloudRunConfig) - }, -) -_sym_db.RegisterMessage(CloudRunConfig) - -MasterAuthorizedNetworksConfig = _reflection.GeneratedProtocolMessageType( - "MasterAuthorizedNetworksConfig", - (_message.Message,), - { - "CidrBlock": _reflection.GeneratedProtocolMessageType( - "CidrBlock", - (_message.Message,), - { - "DESCRIPTOR": _MASTERAUTHORIZEDNETWORKSCONFIG_CIDRBLOCK, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """CidrBlock contains an optional name and one CIDR block. - - Attributes: - display_name: - display_name is an optional field for users to identify CIDR - blocks. - cidr_block: - cidr_block must be specified in CIDR notation. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock) - }, - ), - "DESCRIPTOR": _MASTERAUTHORIZEDNETWORKSCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the master authorized networks feature. - Enabled master authorized networks will disallow all external traffic - to access Kubernetes master through HTTPS except traffic from the - given CIDR blocks, Google Compute Engine Public IPs and Google Prod - IPs. - - Attributes: - enabled: - Whether or not master authorized networks is enabled. - cidr_blocks: - cidr_blocks define up to 50 external networks that could - access Kubernetes master through HTTPS. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.MasterAuthorizedNetworksConfig) - }, -) -_sym_db.RegisterMessage(MasterAuthorizedNetworksConfig) -_sym_db.RegisterMessage(MasterAuthorizedNetworksConfig.CidrBlock) - -LegacyAbac = _reflection.GeneratedProtocolMessageType( - "LegacyAbac", - (_message.Message,), - { - "DESCRIPTOR": _LEGACYABAC, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for the legacy Attribute Based Access Control - authorization mode. - - Attributes: - enabled: - Whether the ABAC authorizer is enabled for this cluster. When - enabled, identities in the system, including service accounts, - nodes, and controllers, will have statically granted - permissions beyond those provided by the RBAC configuration or - IAM. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.LegacyAbac) - }, -) -_sym_db.RegisterMessage(LegacyAbac) - -NetworkPolicy = _reflection.GeneratedProtocolMessageType( - "NetworkPolicy", - (_message.Message,), - { - "DESCRIPTOR": _NETWORKPOLICY, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the NetworkPolicy feature. - https://kubernetes.io/docs/concepts/services- - networking/networkpolicies/ - - Attributes: - provider: - The selected network policy provider. - enabled: - Whether network policy is enabled on the cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NetworkPolicy) - }, -) -_sym_db.RegisterMessage(NetworkPolicy) - -BinaryAuthorization = _reflection.GeneratedProtocolMessageType( - "BinaryAuthorization", - (_message.Message,), - { - "DESCRIPTOR": _BINARYAUTHORIZATION, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for Binary Authorization. - - Attributes: - enabled: - Enable Binary Authorization for this cluster. If enabled, all - container images will be validated by Binary Authorization. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.BinaryAuthorization) - }, -) -_sym_db.RegisterMessage(BinaryAuthorization) - -IPAllocationPolicy = _reflection.GeneratedProtocolMessageType( - "IPAllocationPolicy", - (_message.Message,), - { - "DESCRIPTOR": _IPALLOCATIONPOLICY, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for controlling how IPs are allocated in the cluster. - - Attributes: - use_ip_aliases: - Whether alias IPs will be used for pod IPs in the cluster. - create_subnetwork: - Whether a new subnetwork will be created automatically for the - cluster. This field is only applicable when - ``use_ip_aliases`` is true. - subnetwork_name: - A custom subnetwork name to be used if ``create_subnetwork`` - is true. If this field is empty, then an automatic name will - be chosen for the new subnetwork. - cluster_ipv4_cidr: - This field is deprecated, use cluster_ipv4_cidr_block. - node_ipv4_cidr: - This field is deprecated, use node_ipv4_cidr_block. - services_ipv4_cidr: - This field is deprecated, use services_ipv4_cidr_block. - cluster_secondary_range_name: - The name of the secondary range to be used for the cluster - CIDR block. The secondary range will be used for pod IP - addresses. This must be an existing secondary range associated - with the cluster subnetwork. This field is only applicable - with use_ip_aliases is true and create_subnetwork is false. - services_secondary_range_name: - The name of the secondary range to be used as for the services - CIDR block. The secondary range will be used for service - ClusterIPs. This must be an existing secondary range - associated with the cluster subnetwork. This field is only - applicable with use_ip_aliases is true and create_subnetwork - is false. - cluster_ipv4_cidr_block: - The IP address range for the cluster pod IPs. If this field is - set, then ``cluster.cluster_ipv4_cidr`` must be left blank. - This field is only applicable when ``use_ip_aliases`` is true. - Set to blank to have a range chosen with the default size. - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - node_ipv4_cidr_block: - The IP address range of the instance IPs in this cluster. - This is applicable only if ``create_subnetwork`` is true. Set - to blank to have a range chosen with the default size. Set to - /netmask (e.g. ``/14``) to have a range chosen with a specific - netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - services_ipv4_cidr_block: - The IP address range of the services IPs in this cluster. If - blank, a range will be automatically chosen with the default - size. This field is only applicable when ``use_ip_aliases`` - is true. Set to blank to have a range chosen with the default - size. Set to /netmask (e.g. ``/14``) to have a range chosen - with a specific netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - tpu_ipv4_cidr_block: - The IP address range of the Cloud TPUs in this cluster. If - unspecified, a range will be automatically chosen with the - default size. This field is only applicable when - ``use_ip_aliases`` is true. If unspecified, the range will - use the default size. Set to /netmask (e.g. ``/14``) to have - a range chosen with a specific netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.IPAllocationPolicy) - }, -) -_sym_db.RegisterMessage(IPAllocationPolicy) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - { - "ResourceLabelsEntry": _reflection.GeneratedProtocolMessageType( - "ResourceLabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTER_RESOURCELABELSENTRY, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2" - # @@protoc_insertion_point(class_scope:google.container.v1.Cluster.ResourceLabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTER, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """A Google Kubernetes Engine cluster. - - Attributes: - name: - The name of this cluster. The name must be unique within this - project and location (e.g. zone or region), and can be up to - 40 characters with the following restrictions: - Lowercase - letters, numbers, and hyphens only. - Must start with a - letter. - Must end with a number or a letter. - description: - An optional description of this cluster. - initial_node_count: - The number of nodes to create in this cluster. You must ensure - that your Compute Engine `resource quota - `__ is sufficient for - this number of instances. You must also have available - firewall and routes quota. For requests, this field should - only be used in lieu of a “node_pool” object, since this - configuration (along with the “node_config”) will be used to - create a “NodePool” object with an auto-generated name. Do not - use this and a node_pool at the same time. This field is - deprecated, use node_pool.initial_node_count instead. - node_config: - Parameters used in creating the cluster’s nodes. For requests, - this field should only be used in lieu of a “node_pool” - object, since this configuration (along with the - “initial_node_count”) will be used to create a “NodePool” - object with an auto-generated name. Do not use this and a - node_pool at the same time. For responses, this field will be - populated with the node configuration of the first node pool. - (For configuration of each node pool, see - ``node_pool.config``) If unspecified, the defaults are used. - This field is deprecated, use node_pool.config instead. - master_auth: - The authentication information for accessing the master - endpoint. If unspecified, the defaults are used: For clusters - before v1.12, if master_auth is unspecified, ``username`` will - be set to “admin”, a random password will be generated, and a - client certificate will be issued. - logging_service: - The logging service the cluster should use to write logs. - Currently available options: - - “logging.googleapis.com/kubernetes” - the Google Cloud Logging - service with Kubernetes-native resource model - - ``logging.googleapis.com`` - the Google Cloud Logging service. - - ``none`` - no logs will be exported from the cluster. - if - left as an empty string,\ ``logging.googleapis.com`` will be - used. - monitoring_service: - The monitoring service the cluster should use to write - metrics. Currently available options: - - ``monitoring.googleapis.com`` - the Google Cloud Monitoring - service. - ``none`` - no metrics will be exported from the - cluster. - if left as an empty string, - ``monitoring.googleapis.com`` will be used. - network: - The name of the Google Compute Engine `network - `__ to which the cluster is connected. If - left unspecified, the ``default`` network will be used. - cluster_ipv4_cidr: - The IP address range of the container pods in this cluster, in - `CIDR `__ notation (e.g. ``10.96.0.0/14``). Leave - blank to have one automatically chosen or specify a ``/14`` - block in ``10.0.0.0/8``. - addons_config: - Configurations for the various addons available to run in the - cluster. - subnetwork: - The name of the Google Compute Engine `subnetwork - `__ to - which the cluster is connected. - node_pools: - The node pools associated with this cluster. This field should - not be set if “node_config” or “initial_node_count” are - specified. - locations: - The list of Google Compute Engine `zones - `__ in - which the cluster’s nodes should be located. - enable_kubernetes_alpha: - Kubernetes alpha features are enabled on this cluster. This - includes alpha API groups (e.g. v1alpha1) and features that - may not be production ready in the kubernetes version of the - master and nodes. The cluster has no SLA for uptime and - master/node upgrades are disabled. Alpha enabled clusters are - automatically deleted thirty days after creation. - resource_labels: - The resource labels for the cluster to use to annotate any - related Google Compute Engine resources. - label_fingerprint: - The fingerprint of the set of labels for this cluster. - legacy_abac: - Configuration for the legacy ABAC authorization mode. - network_policy: - Configuration options for the NetworkPolicy feature. - ip_allocation_policy: - Configuration for cluster IP allocation. - master_authorized_networks_config: - The configuration options for master authorized networks - feature. - maintenance_policy: - Configure the maintenance policy for this cluster. - binary_authorization: - Configuration for Binary Authorization. - autoscaling: - Cluster-level autoscaling configuration. - network_config: - Configuration for cluster networking. - default_max_pods_constraint: - The default constraint on the maximum number of pods that can - be run simultaneously on a node in the node pool of this - cluster. Only honored if cluster created with IP Alias - support. - resource_usage_export_config: - Configuration for exporting resource usages. Resource usage - export is disabled when this config is unspecified. - authenticator_groups_config: - Configuration controlling RBAC group membership information. - private_cluster_config: - Configuration for private cluster. - database_encryption: - Configuration of etcd encryption. - vertical_pod_autoscaling: - Cluster-level Vertical Pod Autoscaling configuration. - self_link: - [Output only] Server-defined URL for the resource. - zone: - [Output only] The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field is deprecated, use - location instead. - endpoint: - [Output only] The IP address of this cluster’s master - endpoint. The endpoint can be accessed from the internet at - ``https://username:password@endpoint/``. See the - ``masterAuth`` property of this resource for username and - password information. - initial_cluster_version: - The initial Kubernetes version for this cluster. Valid - versions are those found in validMasterVersions returned by - getServerConfig. The version can be upgraded over time; such - upgrades are reflected in currentMasterVersion and - currentNodeVersion. Users may specify either explicit - versions offered by Kubernetes Engine or version aliases, - which have the following behavior: - “latest”: picks the - highest valid Kubernetes version - “1.X”: picks the highest - valid patch+gke.N patch in the 1.X version - “1.X.Y”: picks - the highest valid gke.N patch in the 1.X.Y version - - “1.X.Y-gke.N”: picks an explicit Kubernetes version - "“,”-": - picks the default Kubernetes version - current_master_version: - [Output only] The current software version of the master - endpoint. - current_node_version: - [Output only] Deprecated, use `NodePools.version - `__ instead. The - current version of the node software components. If they are - currently at multiple versions because they’re in the process - of being upgraded, this reflects the minimum version of all - nodes. - create_time: - [Output only] The time the cluster was created, in `RFC3339 - `__ text format. - status: - [Output only] The current status of this cluster. - status_message: - [Output only] Additional information about the current status - of this cluster, if available. - node_ipv4_cidr_size: - [Output only] The size of the address space on each node for - hosting containers. This is provisioned from within the - ``container_ipv4_cidr`` range. This field will only be set - when cluster is in route-based network mode. - services_ipv4_cidr: - [Output only] The IP address range of the Kubernetes services - in this cluster, in `CIDR - `__ notation (e.g. ``1.2.3.4/29``). Service - addresses are typically put in the last ``/16`` from the - container CIDR. - instance_group_urls: - Deprecated. Use node_pools.instance_group_urls. - current_node_count: - [Output only] The number of nodes currently in the cluster. - Deprecated. Call Kubernetes API directly to retrieve node - information. - expire_time: - [Output only] The time the cluster will be automatically - deleted in `RFC3339 `__ - text format. - location: - [Output only] The name of the Google Compute Engine `zone - `__ or `region - `__ in which the cluster resides. - enable_tpu: - Enable the ability to use Cloud TPUs in this cluster. - tpu_ipv4_cidr_block: - [Output only] The IP address range of the Cloud TPUs in this - cluster, in `CIDR - `__ notation (e.g. ``1.2.3.4/29``). - conditions: - Which conditions caused the current cluster state. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.Cluster) - }, -) -_sym_db.RegisterMessage(Cluster) -_sym_db.RegisterMessage(Cluster.ResourceLabelsEntry) - -ClusterUpdate = _reflection.GeneratedProtocolMessageType( - "ClusterUpdate", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERUPDATE, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ClusterUpdate describes an update to the cluster. Exactly one update - can be applied to a cluster with each request, so at most one field - can be provided. - - Attributes: - desired_node_version: - The Kubernetes version to change the nodes to (typically an - upgrade). Users may specify either explicit versions offered - by Kubernetes Engine or version aliases, which have the - following behavior: - “latest”: picks the highest valid - Kubernetes version - “1.X”: picks the highest valid - patch+gke.N patch in the 1.X version - “1.X.Y”: picks the - highest valid gke.N patch in the 1.X.Y version - - “1.X.Y-gke.N”: picks an explicit Kubernetes version - “-”: - picks the Kubernetes master version - desired_monitoring_service: - The monitoring service the cluster should use to write - metrics. Currently available options: - - “monitoring.googleapis.com/kubernetes” - the Google Cloud - Monitoring service with Kubernetes-native resource model - - “monitoring.googleapis.com” - the Google Cloud Monitoring - service - “none” - no metrics will be exported from the - cluster - desired_addons_config: - Configurations for the various addons available to run in the - cluster. - desired_node_pool_id: - The node pool to be upgraded. This field is mandatory if - “desired_node_version”, “desired_image_family” or - “desired_node_pool_autoscaling” is specified and there is more - than one node pool on the cluster. - desired_image_type: - The desired image type for the node pool. NOTE: Set the - “desired_node_pool” field as well. - desired_database_encryption: - Configuration of etcd encryption. - desired_node_pool_autoscaling: - Autoscaler configuration for the node pool specified in - desired_node_pool_id. If there is only one pool in the cluster - and desired_node_pool_id is not provided then the change - applies to that single node pool. - desired_locations: - The desired list of Google Compute Engine `zones - `__ in - which the cluster’s nodes should be located. Changing the - locations a cluster is in will result in nodes being either - created or removed from the cluster, depending on whether - locations are being added or removed. This list must always - include the cluster’s primary zone. - desired_master_authorized_networks_config: - The desired configuration options for master authorized - networks feature. - desired_cluster_autoscaling: - Cluster-level autoscaling configuration. - desired_binary_authorization: - The desired configuration options for the Binary Authorization - feature. - desired_logging_service: - The logging service the cluster should use to write logs. - Currently available options: - - “logging.googleapis.com/kubernetes” - the Google Cloud Logging - service with Kubernetes-native resource model - - “logging.googleapis.com” - the Google Cloud Logging service - - “none” - no logs will be exported from the cluster - desired_resource_usage_export_config: - The desired configuration for exporting resource usage. - desired_vertical_pod_autoscaling: - Cluster-level Vertical Pod Autoscaling configuration. - desired_intra_node_visibility_config: - The desired config of Intra-node visibility. - desired_master_version: - The Kubernetes version to change the master to. Users may - specify either explicit versions offered by Kubernetes Engine - or version aliases, which have the following behavior: - - “latest”: picks the highest valid Kubernetes version - “1.X”: - picks the highest valid patch+gke.N patch in the 1.X version - - “1.X.Y”: picks the highest valid gke.N patch in the 1.X.Y - version - “1.X.Y-gke.N”: picks an explicit Kubernetes version - - “-”: picks the default Kubernetes version - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ClusterUpdate) - }, -) -_sym_db.RegisterMessage(ClusterUpdate) - -Operation = _reflection.GeneratedProtocolMessageType( - "Operation", - (_message.Message,), - { - "DESCRIPTOR": _OPERATION, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """This operation resource represents operations that may have happened - or are happening on the cluster. All fields are output only. - - Attributes: - name: - The server-assigned ID for the operation. - zone: - The name of the Google Compute Engine `zone - `__ in - which the operation is taking place. This field is deprecated, - use location instead. - operation_type: - The operation type. - status: - The current status of the operation. - detail: - Detailed operation progress, if available. - status_message: - If an error has occurred, a textual description of the error. - self_link: - Server-defined URL for the resource. - target_link: - Server-defined URL for the target of the operation. - location: - [Output only] The name of the Google Compute Engine `zone - `__ or `region - `__ in which the cluster resides. - start_time: - [Output only] The time the operation started, in `RFC3339 - `__ text format. - end_time: - [Output only] The time the operation completed, in `RFC3339 - `__ text format. - cluster_conditions: - Which conditions caused the current cluster state. - nodepool_conditions: - Which conditions caused the current node pool state. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.Operation) - }, -) -_sym_db.RegisterMessage(Operation) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """CreateClusterRequest creates a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the parent field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the parent field. - cluster: - Required. A `cluster resource - `__ - parent: - The parent (project and location) where the cluster will be - created. Specified in the format ``projects/*/locations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.CreateClusterRequest) - }, -) -_sym_db.RegisterMessage(CreateClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCLUSTERREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """GetClusterRequest gets the settings of a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to retrieve. This field - has been deprecated and replaced by the name field. - name: - The name (project, location, cluster) of the cluster to - retrieve. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.GetClusterRequest) - }, -) -_sym_db.RegisterMessage(GetClusterRequest) - -UpdateClusterRequest = _reflection.GeneratedProtocolMessageType( - "UpdateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECLUSTERREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """UpdateClusterRequest updates the settings of a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - update: - Required. A description of the update. - name: - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.UpdateClusterRequest) - }, -) -_sym_db.RegisterMessage(UpdateClusterRequest) - -UpdateNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "UpdateNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATENODEPOOLREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """UpdateNodePoolRequests update a node pool’s image and/or version. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - node_pool_id: - Deprecated. The name of the node pool to upgrade. This field - has been deprecated and replaced by the name field. - node_version: - Required. The Kubernetes version to change the nodes to - (typically an upgrade). Users may specify either explicit - versions offered by Kubernetes Engine or version aliases, - which have the following behavior: - “latest”: picks the - highest valid Kubernetes version - “1.X”: picks the highest - valid patch+gke.N patch in the 1.X version - “1.X.Y”: picks - the highest valid gke.N patch in the 1.X.Y version - - “1.X.Y-gke.N”: picks an explicit Kubernetes version - “-”: - picks the Kubernetes master version - image_type: - Required. The desired image type for the node pool. - name: - The name (project, location, cluster, node pool) of the node - pool to update. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.UpdateNodePoolRequest) - }, -) -_sym_db.RegisterMessage(UpdateNodePoolRequest) - -SetNodePoolAutoscalingRequest = _reflection.GeneratedProtocolMessageType( - "SetNodePoolAutoscalingRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNODEPOOLAUTOSCALINGREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetNodePoolAutoscalingRequest sets the autoscaler settings of a node - pool. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - node_pool_id: - Deprecated. The name of the node pool to upgrade. This field - has been deprecated and replaced by the name field. - autoscaling: - Required. Autoscaling configuration for the node pool. - name: - The name (project, location, cluster, node pool) of the node - pool to set autoscaler settings. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetNodePoolAutoscalingRequest) - }, -) -_sym_db.RegisterMessage(SetNodePoolAutoscalingRequest) - -SetLoggingServiceRequest = _reflection.GeneratedProtocolMessageType( - "SetLoggingServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETLOGGINGSERVICEREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetLoggingServiceRequest sets the logging service of a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - logging_service: - Required. The logging service the cluster should use to write - metrics. Currently available options: - - “logging.googleapis.com” - the Google Cloud Logging service - - “none” - no metrics will be exported from the cluster - name: - The name (project, location, cluster) of the cluster to set - logging. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetLoggingServiceRequest) - }, -) -_sym_db.RegisterMessage(SetLoggingServiceRequest) - -SetMonitoringServiceRequest = _reflection.GeneratedProtocolMessageType( - "SetMonitoringServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETMONITORINGSERVICEREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetMonitoringServiceRequest sets the monitoring service of a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - monitoring_service: - Required. The monitoring service the cluster should use to - write metrics. Currently available options: - - “monitoring.googleapis.com/kubernetes” - the Google Cloud - Monitoring service with Kubernetes-native resource model - - “monitoring.googleapis.com” - the Google Cloud Monitoring - service - “none” - no metrics will be exported from the - cluster - name: - The name (project, location, cluster) of the cluster to set - monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetMonitoringServiceRequest) - }, -) -_sym_db.RegisterMessage(SetMonitoringServiceRequest) - -SetAddonsConfigRequest = _reflection.GeneratedProtocolMessageType( - "SetAddonsConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETADDONSCONFIGREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetAddonsConfigRequest sets the addons associated with the cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - addons_config: - Required. The desired configurations for the various addons - available to run in the cluster. - name: - The name (project, location, cluster) of the cluster to set - addons. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetAddonsConfigRequest) - }, -) -_sym_db.RegisterMessage(SetAddonsConfigRequest) - -SetLocationsRequest = _reflection.GeneratedProtocolMessageType( - "SetLocationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETLOCATIONSREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetLocationsRequest sets the locations of the cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - locations: - Required. The desired list of Google Compute Engine `zones - `__ in - which the cluster’s nodes should be located. Changing the - locations a cluster is in will result in nodes being either - created or removed from the cluster, depending on whether - locations are being added or removed. This list must always - include the cluster’s primary zone. - name: - The name (project, location, cluster) of the cluster to set - locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetLocationsRequest) - }, -) -_sym_db.RegisterMessage(SetLocationsRequest) - -UpdateMasterRequest = _reflection.GeneratedProtocolMessageType( - "UpdateMasterRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEMASTERREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """UpdateMasterRequest updates the master of the cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - master_version: - Required. The Kubernetes version to change the master to. - Users may specify either explicit versions offered by - Kubernetes Engine or version aliases, which have the following - behavior: - “latest”: picks the highest valid Kubernetes - version - “1.X”: picks the highest valid patch+gke.N patch in - the 1.X version - “1.X.Y”: picks the highest valid gke.N - patch in the 1.X.Y version - “1.X.Y-gke.N”: picks an explicit - Kubernetes version - “-”: picks the default Kubernetes - version - name: - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.UpdateMasterRequest) - }, -) -_sym_db.RegisterMessage(UpdateMasterRequest) - -SetMasterAuthRequest = _reflection.GeneratedProtocolMessageType( - "SetMasterAuthRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETMASTERAUTHREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetMasterAuthRequest updates the admin password of a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to upgrade. This field has - been deprecated and replaced by the name field. - action: - Required. The exact form of action to be taken on the master - auth. - update: - Required. A description of the update. - name: - The name (project, location, cluster) of the cluster to set - auth. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetMasterAuthRequest) - }, -) -_sym_db.RegisterMessage(SetMasterAuthRequest) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETECLUSTERREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """DeleteClusterRequest deletes a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to delete. This field has - been deprecated and replaced by the name field. - name: - The name (project, location, cluster) of the cluster to - delete. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.DeleteClusterRequest) - }, -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListClustersRequest lists clusters. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the parent field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides, or “-” for all zones. This field - has been deprecated and replaced by the parent field. - parent: - The parent (project and location) where the clusters will be - listed. Specified in the format ``projects/*/locations/*``. - Location “-” matches all zones and all regions. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListClustersRequest) - }, -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSRESPONSE, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListClustersResponse is the result of ListClustersRequest. - - Attributes: - clusters: - A list of clusters in the project in the specified zone, or - across all ones. - missing_zones: - If any zones are listed here, the list of clusters returned - may be missing those zones. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListClustersResponse) - }, -) -_sym_db.RegisterMessage(ListClustersResponse) - -GetOperationRequest = _reflection.GeneratedProtocolMessageType( - "GetOperationRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETOPERATIONREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """GetOperationRequest gets a single operation. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - operation_id: - Deprecated. The server-assigned ``name`` of the operation. - This field has been deprecated and replaced by the name field. - name: - The name (project, location, operation id) of the operation to - get. Specified in the format - ``projects/*/locations/*/operations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.GetOperationRequest) - }, -) -_sym_db.RegisterMessage(GetOperationRequest) - -ListOperationsRequest = _reflection.GeneratedProtocolMessageType( - "ListOperationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTOPERATIONSREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListOperationsRequest lists operations. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the parent field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ to - return operations for, or ``-`` for all zones. This field has - been deprecated and replaced by the parent field. - parent: - The parent (project and location) where the operations will be - listed. Specified in the format ``projects/*/locations/*``. - Location “-” matches all zones and all regions. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListOperationsRequest) - }, -) -_sym_db.RegisterMessage(ListOperationsRequest) - -CancelOperationRequest = _reflection.GeneratedProtocolMessageType( - "CancelOperationRequest", - (_message.Message,), - { - "DESCRIPTOR": _CANCELOPERATIONREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """CancelOperationRequest cancels a single operation. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the operation resides. This field has been deprecated - and replaced by the name field. - operation_id: - Deprecated. The server-assigned ``name`` of the operation. - This field has been deprecated and replaced by the name field. - name: - The name (project, location, operation id) of the operation to - cancel. Specified in the format - ``projects/*/locations/*/operations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.CancelOperationRequest) - }, -) -_sym_db.RegisterMessage(CancelOperationRequest) - -ListOperationsResponse = _reflection.GeneratedProtocolMessageType( - "ListOperationsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTOPERATIONSRESPONSE, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListOperationsResponse is the result of ListOperationsRequest. - - Attributes: - operations: - A list of operations in the project in the specified zone. - missing_zones: - If any zones are listed here, the list of operations returned - may be missing the operations from those zones. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListOperationsResponse) - }, -) -_sym_db.RegisterMessage(ListOperationsResponse) - -GetServerConfigRequest = _reflection.GeneratedProtocolMessageType( - "GetServerConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETSERVERCONFIGREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Gets the current Kubernetes Engine service configuration. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ to - return operations for. This field has been deprecated and - replaced by the name field. - name: - The name (project and location) of the server config to get, - specified in the format ``projects/*/locations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.GetServerConfigRequest) - }, -) -_sym_db.RegisterMessage(GetServerConfigRequest) - -ServerConfig = _reflection.GeneratedProtocolMessageType( - "ServerConfig", - (_message.Message,), - { - "DESCRIPTOR": _SERVERCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Kubernetes Engine service configuration. - - Attributes: - default_cluster_version: - Version of Kubernetes the service deploys by default. - valid_node_versions: - List of valid node upgrade target versions. - default_image_type: - Default image type. - valid_image_types: - List of valid image types. - valid_master_versions: - List of valid master versions. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ServerConfig) - }, -) -_sym_db.RegisterMessage(ServerConfig) - -CreateNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "CreateNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATENODEPOOLREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """CreateNodePoolRequest creates a node pool for a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the parent field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the parent field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the parent field. - node_pool: - Required. The node pool to create. - parent: - The parent (project, location, cluster id) where the node pool - will be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.CreateNodePoolRequest) - }, -) -_sym_db.RegisterMessage(CreateNodePoolRequest) - -DeleteNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "DeleteNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETENODEPOOLREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """DeleteNodePoolRequest deletes a node pool for a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the name field. - node_pool_id: - Deprecated. The name of the node pool to delete. This field - has been deprecated and replaced by the name field. - name: - The name (project, location, cluster, node pool id) of the - node pool to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.DeleteNodePoolRequest) - }, -) -_sym_db.RegisterMessage(DeleteNodePoolRequest) - -ListNodePoolsRequest = _reflection.GeneratedProtocolMessageType( - "ListNodePoolsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTNODEPOOLSREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListNodePoolsRequest lists the node pool(s) for a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the parent field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the parent field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the parent field. - parent: - The parent (project, location, cluster id) where the node - pools will be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListNodePoolsRequest) - }, -) -_sym_db.RegisterMessage(ListNodePoolsRequest) - -GetNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "GetNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETNODEPOOLREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """GetNodePoolRequest retrieves a node pool for a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the name field. - node_pool_id: - Deprecated. The name of the node pool. This field has been - deprecated and replaced by the name field. - name: - The name (project, location, cluster, node pool id) of the - node pool to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.GetNodePoolRequest) - }, -) -_sym_db.RegisterMessage(GetNodePoolRequest) - -NodePool = _reflection.GeneratedProtocolMessageType( - "NodePool", - (_message.Message,), - { - "DESCRIPTOR": _NODEPOOL, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """NodePool contains the name and configuration for a cluster’s node - pool. Node pools are a set of nodes (i.e. VM’s), with a common - configuration and specification, under the control of the cluster - master. They may have a set of Kubernetes labels applied to them, - which may be used to reference them during pod scheduling. They may - also be resized up or down, to accommodate the workload. - - Attributes: - name: - The name of the node pool. - config: - The node configuration of the pool. - initial_node_count: - The initial node count for the pool. You must ensure that your - Compute Engine `resource quota - `__ is sufficient for - this number of instances. You must also have available - firewall and routes quota. - self_link: - [Output only] Server-defined URL for the resource. - version: - The version of the Kubernetes of this node. - instance_group_urls: - [Output only] The resource URLs of the `managed instance - groups `__ associated - with this node pool. - status: - [Output only] The status of the nodes in this pool instance. - status_message: - [Output only] Additional information about the current status - of this node pool instance, if available. - autoscaling: - Autoscaler configuration for this NodePool. Autoscaler is - enabled only if a valid configuration is present. - management: - NodeManagement configuration for this NodePool. - max_pods_constraint: - The constraint on the maximum number of pods that can be run - simultaneously on a node in the node pool. - conditions: - Which conditions caused the current node pool state. - pod_ipv4_cidr_size: - [Output only] The pod CIDR block size per node in this node - pool. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NodePool) - }, -) -_sym_db.RegisterMessage(NodePool) - -NodeManagement = _reflection.GeneratedProtocolMessageType( - "NodeManagement", - (_message.Message,), - { - "DESCRIPTOR": _NODEMANAGEMENT, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """NodeManagement defines the set of node management services turned on - for the node pool. - - Attributes: - auto_upgrade: - A flag that specifies whether node auto-upgrade is enabled for - the node pool. If enabled, node auto-upgrade helps keep the - nodes in your node pool up to date with the latest release - version of Kubernetes. - auto_repair: - A flag that specifies whether the node auto-repair is enabled - for the node pool. If enabled, the nodes in this node pool - will be monitored and, if they fail health checks too many - times, an automatic repair action will be triggered. - upgrade_options: - Specifies the Auto Upgrade knobs for the node pool. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NodeManagement) - }, -) -_sym_db.RegisterMessage(NodeManagement) - -AutoUpgradeOptions = _reflection.GeneratedProtocolMessageType( - "AutoUpgradeOptions", - (_message.Message,), - { - "DESCRIPTOR": _AUTOUPGRADEOPTIONS, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """AutoUpgradeOptions defines the set of options for the user to control - how the Auto Upgrades will proceed. - - Attributes: - auto_upgrade_start_time: - [Output only] This field is set when upgrades are about to - commence with the approximate start time for the upgrades, in - `RFC3339 `__ text - format. - description: - [Output only] This field is set when upgrades are about to - commence with the description of the upgrade. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.AutoUpgradeOptions) - }, -) -_sym_db.RegisterMessage(AutoUpgradeOptions) - -MaintenancePolicy = _reflection.GeneratedProtocolMessageType( - "MaintenancePolicy", - (_message.Message,), - { - "DESCRIPTOR": _MAINTENANCEPOLICY, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """MaintenancePolicy defines the maintenance policy to be used for the - cluster. - - Attributes: - window: - Specifies the maintenance window in which maintenance may be - performed. - resource_version: - A hash identifying the version of this policy, so that updates - to fields of the policy won’t accidentally undo intermediate - changes (and so that users of the API unaware of some fields - won’t accidentally remove other fields). Make a get() request - to the cluster to get the current resource version and include - it with requests to set the policy. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.MaintenancePolicy) - }, -) -_sym_db.RegisterMessage(MaintenancePolicy) - -MaintenanceWindow = _reflection.GeneratedProtocolMessageType( - "MaintenanceWindow", - (_message.Message,), - { - "MaintenanceExclusionsEntry": _reflection.GeneratedProtocolMessageType( - "MaintenanceExclusionsEntry", - (_message.Message,), - { - "DESCRIPTOR": _MAINTENANCEWINDOW_MAINTENANCEEXCLUSIONSENTRY, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2" - # @@protoc_insertion_point(class_scope:google.container.v1.MaintenanceWindow.MaintenanceExclusionsEntry) - }, - ), - "DESCRIPTOR": _MAINTENANCEWINDOW, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """MaintenanceWindow defines the maintenance window to be used for the - cluster. - - Attributes: - daily_maintenance_window: - DailyMaintenanceWindow specifies a daily maintenance operation - window. - recurring_window: - RecurringWindow specifies some number of recurring time - periods for maintenance to occur. The time windows may be - overlapping. If no maintenance windows are set, maintenance - can occur at any time. - maintenance_exclusions: - Exceptions to maintenance window. Non-emergency maintenance - should not occur in these windows. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.MaintenanceWindow) - }, -) -_sym_db.RegisterMessage(MaintenanceWindow) -_sym_db.RegisterMessage(MaintenanceWindow.MaintenanceExclusionsEntry) - -TimeWindow = _reflection.GeneratedProtocolMessageType( - "TimeWindow", - (_message.Message,), - { - "DESCRIPTOR": _TIMEWINDOW, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Represents an arbitrary window of time. - - Attributes: - start_time: - The time that the window first starts. - end_time: - The time that the window ends. The end time should take place - after the start time. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.TimeWindow) - }, -) -_sym_db.RegisterMessage(TimeWindow) - -RecurringTimeWindow = _reflection.GeneratedProtocolMessageType( - "RecurringTimeWindow", - (_message.Message,), - { - "DESCRIPTOR": _RECURRINGTIMEWINDOW, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Represents an arbitrary window of time that recurs. - - Attributes: - window: - The window of the first recurrence. - recurrence: - An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) - for how this window reccurs. They go on for the span of time - between the start and end time. For example, to have - something repeat every weekday, you’d use: - FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR To repeat some window daily - (equivalent to the DailyMaintenanceWindow): FREQ=DAILY For the - first weekend of every month: - FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU This specifies how - frequently the window starts. Eg, if you wanted to have a 9-5 - UTC-4 window every weekday, you’d use something like: start - time = 2019-01-01T09:00:00-0400 end time = - 2019-01-01T17:00:00-0400 recurrence = - FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR Windows can span multiple - days. Eg, to make the window encompass every weekend from - midnight Saturday till the last minute of Sunday UTC: start - time = 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z - recurrence = FREQ=WEEKLY;BYDAY=SA Note the start and end - time’s specific dates are largely arbitrary except to specify - duration of the window and when it first starts. The FREQ - values of HOURLY, MINUTELY, and SECONDLY are not supported. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.RecurringTimeWindow) - }, -) -_sym_db.RegisterMessage(RecurringTimeWindow) - -DailyMaintenanceWindow = _reflection.GeneratedProtocolMessageType( - "DailyMaintenanceWindow", - (_message.Message,), - { - "DESCRIPTOR": _DAILYMAINTENANCEWINDOW, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Time window specified for daily maintenance operations. - - Attributes: - start_time: - Time within the maintenance window to start the maintenance - operations. Time format should be in `RFC3339 - `__ format “HH:MM”, - where HH : [00-23] and MM : [00-59] GMT. - duration: - [Output only] Duration of the time window, automatically - chosen to be smallest possible in the given scenario. Duration - will be in `RFC3339 `__ - format “PTnHnMnS”. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.DailyMaintenanceWindow) - }, -) -_sym_db.RegisterMessage(DailyMaintenanceWindow) - -SetNodePoolManagementRequest = _reflection.GeneratedProtocolMessageType( - "SetNodePoolManagementRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNODEPOOLMANAGEMENTREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetNodePoolManagementRequest sets the node management properties of a - node pool. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to update. This field has - been deprecated and replaced by the name field. - node_pool_id: - Deprecated. The name of the node pool to update. This field - has been deprecated and replaced by the name field. - management: - Required. NodeManagement configuration for the node pool. - name: - The name (project, location, cluster, node pool id) of the - node pool to set management properties. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetNodePoolManagementRequest) - }, -) -_sym_db.RegisterMessage(SetNodePoolManagementRequest) - -SetNodePoolSizeRequest = _reflection.GeneratedProtocolMessageType( - "SetNodePoolSizeRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNODEPOOLSIZEREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetNodePoolSizeRequest sets the size a node pool. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to update. This field has - been deprecated and replaced by the name field. - node_pool_id: - Deprecated. The name of the node pool to update. This field - has been deprecated and replaced by the name field. - node_count: - Required. The desired node count for the pool. - name: - The name (project, location, cluster, node pool id) of the - node pool to set size. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetNodePoolSizeRequest) - }, -) -_sym_db.RegisterMessage(SetNodePoolSizeRequest) - -RollbackNodePoolUpgradeRequest = _reflection.GeneratedProtocolMessageType( - "RollbackNodePoolUpgradeRequest", - (_message.Message,), - { - "DESCRIPTOR": _ROLLBACKNODEPOOLUPGRADEREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or - Failed NodePool upgrade. This will be an no-op if the last upgrade - successfully completed. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to rollback. This field - has been deprecated and replaced by the name field. - node_pool_id: - Deprecated. The name of the node pool to rollback. This field - has been deprecated and replaced by the name field. - name: - The name (project, location, cluster, node pool id) of the - node poll to rollback upgrade. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.RollbackNodePoolUpgradeRequest) - }, -) -_sym_db.RegisterMessage(RollbackNodePoolUpgradeRequest) - -ListNodePoolsResponse = _reflection.GeneratedProtocolMessageType( - "ListNodePoolsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTNODEPOOLSRESPONSE, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListNodePoolsResponse is the result of ListNodePoolsRequest. - - Attributes: - node_pools: - A list of node pools for a cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListNodePoolsResponse) - }, -) -_sym_db.RegisterMessage(ListNodePoolsResponse) - -ClusterAutoscaling = _reflection.GeneratedProtocolMessageType( - "ClusterAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERAUTOSCALING, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ClusterAutoscaling contains global, per-cluster information required - by Cluster Autoscaler to automatically adjust the size of the cluster - and create/delete node pools based on the current needs. - - Attributes: - enable_node_autoprovisioning: - Enables automatic node pool creation and deletion. - resource_limits: - Contains global constraints regarding minimum and maximum - amount of resources in the cluster. - autoprovisioning_node_pool_defaults: - AutoprovisioningNodePoolDefaults contains defaults for a node - pool created by NAP. - autoprovisioning_locations: - The list of Google Compute Engine `zones - `__ in - which the NodePool’s nodes can be created by NAP. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ClusterAutoscaling) - }, -) -_sym_db.RegisterMessage(ClusterAutoscaling) - -AutoprovisioningNodePoolDefaults = _reflection.GeneratedProtocolMessageType( - "AutoprovisioningNodePoolDefaults", - (_message.Message,), - { - "DESCRIPTOR": _AUTOPROVISIONINGNODEPOOLDEFAULTS, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """AutoprovisioningNodePoolDefaults contains defaults for a node pool - created by NAP. - - Attributes: - oauth_scopes: - Scopes that are used by NAP when creating node pools. If - oauth_scopes are specified, service_account should be empty. - service_account: - The Google Cloud Platform Service Account to be used by the - node VMs. If service_account is specified, scopes should be - empty. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.AutoprovisioningNodePoolDefaults) - }, -) -_sym_db.RegisterMessage(AutoprovisioningNodePoolDefaults) - -ResourceLimit = _reflection.GeneratedProtocolMessageType( - "ResourceLimit", - (_message.Message,), - { - "DESCRIPTOR": _RESOURCELIMIT, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Contains information about amount of some resource in the cluster. For - memory, value should be in GB. - - Attributes: - resource_type: - Resource name “cpu”, “memory” or gpu-specific string. - minimum: - Minimum amount of the resource in the cluster. - maximum: - Maximum amount of the resource in the cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ResourceLimit) - }, -) -_sym_db.RegisterMessage(ResourceLimit) - -NodePoolAutoscaling = _reflection.GeneratedProtocolMessageType( - "NodePoolAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _NODEPOOLAUTOSCALING, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """NodePoolAutoscaling contains information required by cluster - autoscaler to adjust the size of the node pool to the current cluster - usage. - - Attributes: - enabled: - Is autoscaling enabled for this node pool. - min_node_count: - Minimum number of nodes in the NodePool. Must be >= 1 and <= - max_node_count. - max_node_count: - Maximum number of nodes in the NodePool. Must be >= - min_node_count. There has to enough quota to scale up the - cluster. - autoprovisioned: - Can this node pool be deleted automatically. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NodePoolAutoscaling) - }, -) -_sym_db.RegisterMessage(NodePoolAutoscaling) - -SetLabelsRequest = _reflection.GeneratedProtocolMessageType( - "SetLabelsRequest", - (_message.Message,), - { - "ResourceLabelsEntry": _reflection.GeneratedProtocolMessageType( - "ResourceLabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _SETLABELSREQUEST_RESOURCELABELSENTRY, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2" - # @@protoc_insertion_point(class_scope:google.container.v1.SetLabelsRequest.ResourceLabelsEntry) - }, - ), - "DESCRIPTOR": _SETLABELSREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetLabelsRequest sets the Google Cloud Platform labels on a Google - Container Engine cluster, which will in turn set them for Google - Compute Engine resources used by that cluster - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the name field. - resource_labels: - Required. The labels to set for that cluster. - label_fingerprint: - Required. The fingerprint of the previous set of labels for - this resource, used to detect conflicts. The fingerprint is - initially generated by Kubernetes Engine and changes after - every request to modify or update labels. You must always - provide an up-to-date fingerprint hash when updating or - changing labels. Make a get() request to the resource to get - the latest fingerprint. - name: - The name (project, location, cluster id) of the cluster to set - labels. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetLabelsRequest) - }, -) -_sym_db.RegisterMessage(SetLabelsRequest) -_sym_db.RegisterMessage(SetLabelsRequest.ResourceLabelsEntry) - -SetLegacyAbacRequest = _reflection.GeneratedProtocolMessageType( - "SetLegacyAbacRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETLEGACYABACREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetLegacyAbacRequest enables or disables the ABAC authorization - mechanism for a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster to update. This field has - been deprecated and replaced by the name field. - enabled: - Required. Whether ABAC authorization will be enabled in the - cluster. - name: - The name (project, location, cluster id) of the cluster to set - legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetLegacyAbacRequest) - }, -) -_sym_db.RegisterMessage(SetLegacyAbacRequest) - -StartIPRotationRequest = _reflection.GeneratedProtocolMessageType( - "StartIPRotationRequest", - (_message.Message,), - { - "DESCRIPTOR": _STARTIPROTATIONREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """StartIPRotationRequest creates a new IP for the cluster and then - performs a node upgrade on each node pool to point to the new IP. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the name field. - name: - The name (project, location, cluster id) of the cluster to - start IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - rotate_credentials: - Whether to rotate credentials during IP rotation. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.StartIPRotationRequest) - }, -) -_sym_db.RegisterMessage(StartIPRotationRequest) - -CompleteIPRotationRequest = _reflection.GeneratedProtocolMessageType( - "CompleteIPRotationRequest", - (_message.Message,), - { - "DESCRIPTOR": _COMPLETEIPROTATIONREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """CompleteIPRotationRequest moves the cluster master back into single-IP - mode. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the name field. - name: - The name (project, location, cluster id) of the cluster to - complete IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.CompleteIPRotationRequest) - }, -) -_sym_db.RegisterMessage(CompleteIPRotationRequest) - -AcceleratorConfig = _reflection.GeneratedProtocolMessageType( - "AcceleratorConfig", - (_message.Message,), - { - "DESCRIPTOR": _ACCELERATORCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """AcceleratorConfig represents a Hardware Accelerator request. - - Attributes: - accelerator_count: - The number of the accelerator cards exposed to an instance. - accelerator_type: - The accelerator type resource name. List of supported - accelerators `here - `__ - """, - # @@protoc_insertion_point(class_scope:google.container.v1.AcceleratorConfig) - }, -) -_sym_db.RegisterMessage(AcceleratorConfig) - -SetNetworkPolicyRequest = _reflection.GeneratedProtocolMessageType( - "SetNetworkPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNETWORKPOLICYREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetNetworkPolicyRequest enables/disables network policy for a cluster. - - Attributes: - project_id: - Deprecated. The Google Developers Console `project ID or - project number `__. This field has been deprecated and - replaced by the name field. - zone: - Deprecated. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Deprecated. The name of the cluster. This field has been - deprecated and replaced by the name field. - network_policy: - Required. Configuration options for the NetworkPolicy feature. - name: - The name (project, location, cluster id) of the cluster to set - networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetNetworkPolicyRequest) - }, -) -_sym_db.RegisterMessage(SetNetworkPolicyRequest) - -SetMaintenancePolicyRequest = _reflection.GeneratedProtocolMessageType( - "SetMaintenancePolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETMAINTENANCEPOLICYREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """SetMaintenancePolicyRequest sets the maintenance policy for a cluster. - - Attributes: - project_id: - Required. The Google Developers Console `project ID or project - number `__. - zone: - Required. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. - cluster_id: - Required. The name of the cluster to update. - maintenance_policy: - Required. The maintenance policy to be set for the cluster. An - empty field clears the existing maintenance policy. - name: - The name (project, location, cluster id) of the cluster to set - maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.SetMaintenancePolicyRequest) - }, -) -_sym_db.RegisterMessage(SetMaintenancePolicyRequest) - -StatusCondition = _reflection.GeneratedProtocolMessageType( - "StatusCondition", - (_message.Message,), - { - "DESCRIPTOR": _STATUSCONDITION, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """StatusCondition describes why a cluster or a node pool has a certain - status (e.g., ERROR or DEGRADED). - - Attributes: - code: - Machine-friendly representation of the condition - message: - Human-friendly representation of the condition - """, - # @@protoc_insertion_point(class_scope:google.container.v1.StatusCondition) - }, -) -_sym_db.RegisterMessage(StatusCondition) - -NetworkConfig = _reflection.GeneratedProtocolMessageType( - "NetworkConfig", - (_message.Message,), - { - "DESCRIPTOR": _NETWORKCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """NetworkConfig reports the relative names of network & subnetwork. - - Attributes: - network: - Output only. The relative name of the Google Compute Engine [n - etwork][google.container.v1.NetworkConfig.network](https://clo - ud.google.com/compute/docs/networks-and-firewalls#networks) to - which the cluster is connected. Example: projects/my- - project/global/networks/my-network - subnetwork: - Output only. The relative name of the Google Compute Engine - `subnetwork `__ to - which the cluster is connected. Example: projects/my- - project/regions/us-central1/subnetworks/my-subnet - enable_intra_node_visibility: - Whether Intra-node visibility is enabled for this cluster. - This makes same node pod to pod traffic visible for VPC - network. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.NetworkConfig) - }, -) -_sym_db.RegisterMessage(NetworkConfig) - -IntraNodeVisibilityConfig = _reflection.GeneratedProtocolMessageType( - "IntraNodeVisibilityConfig", - (_message.Message,), - { - "DESCRIPTOR": _INTRANODEVISIBILITYCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """IntraNodeVisibilityConfig contains the desired config of the intra- - node visibility on this cluster. - - Attributes: - enabled: - Enables intra node visibility for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.IntraNodeVisibilityConfig) - }, -) -_sym_db.RegisterMessage(IntraNodeVisibilityConfig) - -MaxPodsConstraint = _reflection.GeneratedProtocolMessageType( - "MaxPodsConstraint", - (_message.Message,), - { - "DESCRIPTOR": _MAXPODSCONSTRAINT, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Constraints applied to pods. - - Attributes: - max_pods_per_node: - Constraint enforced on the max num of pods per node. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.MaxPodsConstraint) - }, -) -_sym_db.RegisterMessage(MaxPodsConstraint) - -DatabaseEncryption = _reflection.GeneratedProtocolMessageType( - "DatabaseEncryption", - (_message.Message,), - { - "DESCRIPTOR": _DATABASEENCRYPTION, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration of etcd encryption. - - Attributes: - state: - Denotes the state of etcd encryption. - key_name: - Name of CloudKMS key to use for the encryption of secrets in - etcd. Ex. projects/my-project/locations/global/keyRings/my- - ring/cryptoKeys/my-key - """, - # @@protoc_insertion_point(class_scope:google.container.v1.DatabaseEncryption) - }, -) -_sym_db.RegisterMessage(DatabaseEncryption) - -ListUsableSubnetworksRequest = _reflection.GeneratedProtocolMessageType( - "ListUsableSubnetworksRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTUSABLESUBNETWORKSREQUEST, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListUsableSubnetworksRequest requests the list of usable subnetworks - available to a user for creating clusters. - - Attributes: - parent: - The parent project where subnetworks are usable. Specified in - the format ``projects/*``. - filter: - Filtering currently only supports equality on the - networkProjectId and must be in the form: - “networkProjectId=[PROJECTID]”, where ``networkProjectId`` is - the project which owns the listed subnetworks. This defaults - to the parent project ID. - page_size: - The max number of results per page that should be returned. If - the number of available results is larger than ``page_size``, - a ``next_page_token`` is returned which can be used to get the - next page of results in subsequent requests. Acceptable values - are 0 to 500, inclusive. (Default: 500) - page_token: - Specifies a page token to use. Set this to the nextPageToken - returned by previous list requests to get the next page of - results. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListUsableSubnetworksRequest) - }, -) -_sym_db.RegisterMessage(ListUsableSubnetworksRequest) - -ListUsableSubnetworksResponse = _reflection.GeneratedProtocolMessageType( - "ListUsableSubnetworksResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTUSABLESUBNETWORKSRESPONSE, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """ListUsableSubnetworksResponse is the response of - ListUsableSubnetworksRequest. - - Attributes: - subnetworks: - A list of usable subnetworks in the specified network project. - next_page_token: - This token allows you to get the next page of results for list - requests. If the number of results is larger than - ``page_size``, use the ``next_page_token`` as a value for the - query parameter ``page_token`` in the next request. The value - will become empty when there are no more pages. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ListUsableSubnetworksResponse) - }, -) -_sym_db.RegisterMessage(ListUsableSubnetworksResponse) - -UsableSubnetworkSecondaryRange = _reflection.GeneratedProtocolMessageType( - "UsableSubnetworkSecondaryRange", - (_message.Message,), - { - "DESCRIPTOR": _USABLESUBNETWORKSECONDARYRANGE, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Secondary IP range of a usable subnetwork. - - Attributes: - range_name: - The name associated with this subnetwork secondary range, used - when adding an alias IP range to a VM instance. - ip_cidr_range: - The range of IP addresses belonging to this subnetwork - secondary range. - status: - This field is to determine the status of the secondary range - programmably. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.UsableSubnetworkSecondaryRange) - }, -) -_sym_db.RegisterMessage(UsableSubnetworkSecondaryRange) - -UsableSubnetwork = _reflection.GeneratedProtocolMessageType( - "UsableSubnetwork", - (_message.Message,), - { - "DESCRIPTOR": _USABLESUBNETWORK, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Network Name. Example: projects/my-project/global/networks/my-network - - Attributes: - subnetwork: - Subnetwork Name. Example: projects/my-project/regions/us- - central1/subnetworks/my-subnet - ip_cidr_range: - The range of internal addresses that are owned by this - subnetwork. - secondary_ip_ranges: - Secondary IP ranges. - status_message: - A human readable status message representing the reasons for - cases where the caller cannot use the secondary ranges under - the subnet. For example if the secondary_ip_ranges is empty - due to a permission issue, an insufficient permission message - will be given by status_message. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.UsableSubnetwork) - }, -) -_sym_db.RegisterMessage(UsableSubnetwork) - -ResourceUsageExportConfig = _reflection.GeneratedProtocolMessageType( - "ResourceUsageExportConfig", - (_message.Message,), - { - "BigQueryDestination": _reflection.GeneratedProtocolMessageType( - "BigQueryDestination", - (_message.Message,), - { - "DESCRIPTOR": _RESOURCEUSAGEEXPORTCONFIG_BIGQUERYDESTINATION, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Parameters for using BigQuery as the destination of resource usage - export. - - Attributes: - dataset_id: - The ID of a BigQuery Dataset. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ResourceUsageExportConfig.BigQueryDestination) - }, - ), - "ConsumptionMeteringConfig": _reflection.GeneratedProtocolMessageType( - "ConsumptionMeteringConfig", - (_message.Message,), - { - "DESCRIPTOR": _RESOURCEUSAGEEXPORTCONFIG_CONSUMPTIONMETERINGCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Parameters for controlling consumption metering. - - Attributes: - enabled: - Whether to enable consumption metering for this cluster. If - enabled, a second BigQuery table will be created to hold - resource consumption records. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ResourceUsageExportConfig.ConsumptionMeteringConfig) - }, - ), - "DESCRIPTOR": _RESOURCEUSAGEEXPORTCONFIG, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """Configuration for exporting cluster resource usages. - - Attributes: - bigquery_destination: - Configuration to use BigQuery as usage export destination. - enable_network_egress_metering: - Whether to enable network egress metering for this cluster. If - enabled, a daemonset will be created in the cluster to meter - network egress traffic. - consumption_metering_config: - Configuration to enable resource consumption metering. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.ResourceUsageExportConfig) - }, -) -_sym_db.RegisterMessage(ResourceUsageExportConfig) -_sym_db.RegisterMessage(ResourceUsageExportConfig.BigQueryDestination) -_sym_db.RegisterMessage(ResourceUsageExportConfig.ConsumptionMeteringConfig) - -VerticalPodAutoscaling = _reflection.GeneratedProtocolMessageType( - "VerticalPodAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _VERTICALPODAUTOSCALING, - "__module__": "google.cloud.container_v1.proto.cluster_service_pb2", - "__doc__": """VerticalPodAutoscaling contains global, per-cluster information - required by Vertical Pod Autoscaler to automatically adjust the - resources of pods controlled by it. - - Attributes: - enabled: - Enables vertical pod autoscaling. - """, - # @@protoc_insertion_point(class_scope:google.container.v1.VerticalPodAutoscaling) - }, -) -_sym_db.RegisterMessage(VerticalPodAutoscaling) - - -DESCRIPTOR._options = None -_NODECONFIG_METADATAENTRY._options = None -_NODECONFIG_LABELSENTRY._options = None -_ADDONSCONFIG.fields_by_name["kubernetes_dashboard"]._options = None -_IPALLOCATIONPOLICY.fields_by_name["cluster_ipv4_cidr"]._options = None -_IPALLOCATIONPOLICY.fields_by_name["node_ipv4_cidr"]._options = None -_IPALLOCATIONPOLICY.fields_by_name["services_ipv4_cidr"]._options = None -_CLUSTER_RESOURCELABELSENTRY._options = None -_CLUSTER.fields_by_name["initial_node_count"]._options = None -_CLUSTER.fields_by_name["node_config"]._options = None -_CLUSTER.fields_by_name["zone"]._options = None -_CLUSTER.fields_by_name["current_node_version"]._options = None -_CLUSTER.fields_by_name["instance_group_urls"]._options = None -_CLUSTER.fields_by_name["current_node_count"]._options = None -_OPERATION.fields_by_name["zone"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["zone"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_GETCLUSTERREQUEST.fields_by_name["project_id"]._options = None -_GETCLUSTERREQUEST.fields_by_name["zone"]._options = None -_GETCLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["zone"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["update"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["zone"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["node_pool_id"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["node_version"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["image_type"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["project_id"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["zone"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["cluster_id"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["node_pool_id"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["autoscaling"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["project_id"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["zone"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["cluster_id"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["logging_service"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["project_id"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["zone"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["cluster_id"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["monitoring_service"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["project_id"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["zone"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["cluster_id"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["addons_config"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["project_id"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["zone"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["cluster_id"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["locations"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["project_id"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["zone"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["master_version"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["project_id"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["zone"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["cluster_id"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["action"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["update"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["zone"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["project_id"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["zone"]._options = None -_GETOPERATIONREQUEST.fields_by_name["project_id"]._options = None -_GETOPERATIONREQUEST.fields_by_name["zone"]._options = None -_GETOPERATIONREQUEST.fields_by_name["operation_id"]._options = None -_LISTOPERATIONSREQUEST.fields_by_name["project_id"]._options = None -_LISTOPERATIONSREQUEST.fields_by_name["zone"]._options = None -_CANCELOPERATIONREQUEST.fields_by_name["project_id"]._options = None -_CANCELOPERATIONREQUEST.fields_by_name["zone"]._options = None -_CANCELOPERATIONREQUEST.fields_by_name["operation_id"]._options = None -_GETSERVERCONFIGREQUEST.fields_by_name["project_id"]._options = None -_GETSERVERCONFIGREQUEST.fields_by_name["zone"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["zone"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["node_pool"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["zone"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["node_pool_id"]._options = None -_LISTNODEPOOLSREQUEST.fields_by_name["project_id"]._options = None -_LISTNODEPOOLSREQUEST.fields_by_name["zone"]._options = None -_LISTNODEPOOLSREQUEST.fields_by_name["cluster_id"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["zone"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["node_pool_id"]._options = None -_MAINTENANCEWINDOW_MAINTENANCEEXCLUSIONSENTRY._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["project_id"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["zone"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["cluster_id"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["node_pool_id"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["management"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["project_id"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["zone"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["cluster_id"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["node_pool_id"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["node_count"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["project_id"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["zone"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["cluster_id"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["node_pool_id"]._options = None -_SETLABELSREQUEST_RESOURCELABELSENTRY._options = None -_SETLABELSREQUEST.fields_by_name["project_id"]._options = None -_SETLABELSREQUEST.fields_by_name["zone"]._options = None -_SETLABELSREQUEST.fields_by_name["cluster_id"]._options = None -_SETLABELSREQUEST.fields_by_name["resource_labels"]._options = None -_SETLABELSREQUEST.fields_by_name["label_fingerprint"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["project_id"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["zone"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["cluster_id"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["enabled"]._options = None -_STARTIPROTATIONREQUEST.fields_by_name["project_id"]._options = None -_STARTIPROTATIONREQUEST.fields_by_name["zone"]._options = None -_STARTIPROTATIONREQUEST.fields_by_name["cluster_id"]._options = None -_COMPLETEIPROTATIONREQUEST.fields_by_name["project_id"]._options = None -_COMPLETEIPROTATIONREQUEST.fields_by_name["zone"]._options = None -_COMPLETEIPROTATIONREQUEST.fields_by_name["cluster_id"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["project_id"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["zone"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["cluster_id"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["network_policy"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["project_id"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["zone"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["cluster_id"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["maintenance_policy"]._options = None - -_CLUSTERMANAGER = _descriptor.ServiceDescriptor( - name="ClusterManager", - full_name="google.container.v1.ClusterManager", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\030container.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=15940, - serialized_end=24726, - methods=[ - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.container.v1.ClusterManager.ListClusters", - index=0, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=b"\202\323\344\223\002a\022,/v1/{parent=projects/*/locations/*}/clustersZ1\022//v1/projects/{project_id}/zones/{zone}/clusters\332A\017project_id,zone\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.container.v1.ClusterManager.GetCluster", - index=1, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=_CLUSTER, - serialized_options=b"\202\323\344\223\002n\022,/v1/{name=projects/*/locations/*/clusters/*}Z>\022*/v1/{name=projects/*/locations/*/clusters/*}:setResourceLabels:\001*ZP"K/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/resourceLabels:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetLegacyAbac", - full_name="google.container.v1.ClusterManager.SetLegacyAbac", - index=24, - containing_service=None, - input_type=_SETLEGACYABACREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\215\001":/v1/{name=projects/*/locations/*/clusters/*}:setLegacyAbac:\001*ZL"G/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/legacyAbac:\001*\332A"project_id,zone,cluster_id,enabled\332A\014name,enabled', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="StartIPRotation", - full_name="google.container.v1.ClusterManager.StartIPRotation", - index=25, - containing_service=None, - input_type=_STARTIPROTATIONREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\224\001" None: + """Instantiate the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = ClusterManagerClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def list_clusters( + self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (:class:`~.cluster_service.ListClustersRequest`): + The request object. ListClustersRequest lists clusters. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project and location) where the clusters + will be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all + zones and all regions. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_cluster( + self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details of a specific cluster. + + Args: + request (:class:`~.cluster_service.GetClusterRequest`): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to retrieve. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_cluster( + self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (:class:`~.cluster_service.CreateClusterRequest`): + The request object. CreateClusterRequest creates a + cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.cluster_service.Cluster`): + Required. A `cluster + resource `__ + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project and location) where the cluster will + be created. Specified in the format + ``projects/*/locations/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster, parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_cluster( + self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings of a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateClusterRequest`): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (:class:`~.cluster_service.ClusterUpdate`): + Required. A description of the + update. + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, update, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_node_pool( + self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type for the + specified node pool. + + Args: + request (:class:`~.cluster_service.UpdateNodePoolRequest`): + The request object. UpdateNodePoolRequests update a node + pool's image and/or version. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_node_pool_autoscaling( + self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings for the specified node + pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolAutoscalingRequest`): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_autoscaling, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_logging_service( + self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLoggingServiceRequest`): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (:class:`str`): + Required. The logging service the cluster should use to + write metrics. Currently available options: + + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, logging_service, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_logging_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_monitoring_service( + self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetMonitoringServiceRequest`): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (:class:`str`): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - the Google + Cloud Monitoring service with Kubernetes-native + resource model + - "monitoring.googleapis.com" - the Google Cloud + Monitoring service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, monitoring_service, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_monitoring_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_addons_config( + self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetAddonsConfigRequest`): + The request object. SetAddonsConfigRequest sets the + addons associated with the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (:class:`~.cluster_service.AddonsConfig`): + Required. The desired configurations + for the various addons available to run + in the cluster. + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, addons_config, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_addons_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_locations( + self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLocationsRequest`): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (:class:`Sequence[str]`): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, locations, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_locations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_master( + self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateMasterRequest`): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (:class:`str`): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, master_version, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_master, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_master_auth( + self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (:class:`~.cluster_service.SetMasterAuthRequest`): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_master_auth, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_cluster( + self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (:class:`~.cluster_service.DeleteClusterRequest`): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_operations( + self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in a specific zone + or all zones. + + Args: + request (:class:`~.cluster_service.ListOperationsRequest`): + The request object. ListOperationsRequest lists + operations. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (:class:`~.cluster_service.GetOperationRequest`): + The request object. GetOperationRequest gets a single + operation. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def cancel_operation( + self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (:class:`~.cluster_service.CancelOperationRequest`): + The request object. CancelOperationRequest cancels a + single operation. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, operation id) of the + operation to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CancelOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def get_server_config( + self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (:class:`~.cluster_service.GetServerConfigRequest`): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project and location) of the server config to + get, specified in the format ``projects/*/locations/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetServerConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_server_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_node_pools( + self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (:class:`~.cluster_service.ListNodePoolsRequest`): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project, location, cluster id) where the + node pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListNodePoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_node_pools, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_node_pool( + self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (:class:`~.cluster_service.GetNodePoolRequest`): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node + pool. This field has been deprecated and + replaced by the name field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_node_pool( + self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (:class:`~.cluster_service.CreateNodePoolRequest`): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (:class:`~.cluster_service.NodePool`): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project, location, cluster id) where the + node pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool, parent] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_node_pool( + self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (:class:`~.cluster_service.DeleteNodePoolRequest`): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node pool + to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def rollback_node_pool_upgrade( + self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (:class:`~.cluster_service.RollbackNodePoolUpgradeRequest`): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to rollback. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node pool + to rollback. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node poll to rollback upgrade. Specified in the + format + ``projects/*/locations/*/clusters/*/nodePools/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.rollback_node_pool_upgrade, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_node_pool_management( + self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolManagementRequest`): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolManagementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_management, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_labels( + self, + request: cluster_service.SetLabelsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (:class:`~.cluster_service.SetLabelsRequest`): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetLabelsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_labels, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_legacy_abac( + self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (:class:`~.cluster_service.SetLegacyAbacRequest`): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (:class:`bool`): + Required. Whether ABAC authorization + will be enabled in the cluster. + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, enabled, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_legacy_abac, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def start_ip_rotation( + self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (:class:`~.cluster_service.StartIPRotationRequest`): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.StartIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.start_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def complete_ip_rotation( + self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (:class:`~.cluster_service.CompleteIPRotationRequest`): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CompleteIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.complete_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_node_pool_size( + self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolSizeRequest`): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_size, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_network_policy( + self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetNetworkPolicyRequest`): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (:class:`~.cluster_service.NetworkPolicy`): + Required. Configuration options for + the NetworkPolicy feature. + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, network_policy, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_network_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_maintenance_policy( + self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetMaintenancePolicyRequest`): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (:class:`str`): + Required. The Google Developers Console `project ID or + project + number `__. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The name of the cluster to + update. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (:class:`~.cluster_service.MaintenancePolicy`): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, maintenance_policy, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_maintenance_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_usable_subnetworks( + self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksAsyncPager: + r"""Lists subnetworks that are usable for creating + clusters in a project. + + Args: + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks available to a + user for creating clusters. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUsableSubnetworksAsyncPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.ListUsableSubnetworksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_usable_subnetworks, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListUsableSubnetworksAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-container",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterManagerAsyncClient",) diff --git a/google/cloud/container_v1/services/cluster_manager/client.py b/google/cloud/container_v1/services/cluster_manager/client.py new file mode 100644 index 00000000..42e89bfb --- /dev/null +++ b/google/cloud/container_v1/services/cluster_manager/client.py @@ -0,0 +1,3327 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.container_v1.services.cluster_manager import pagers +from google.cloud.container_v1.types import cluster_service + +from .transports.base import ClusterManagerTransport +from .transports.grpc import ClusterManagerGrpcTransport +from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +class ClusterManagerClientMeta(type): + """Metaclass for the ClusterManager client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ClusterManagerTransport]] + _transport_registry["grpc"] = ClusterManagerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[ClusterManagerTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterManagerClient(metaclass=ClusterManagerClientMeta): + """Google Kubernetes Engine Cluster Manager v1""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "container.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ClusterManagerTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterManagerTransport): + # transport is a ClusterManagerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + ) + + def list_clusters( + self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (:class:`~.cluster_service.ListClustersRequest`): + The request object. ListClustersRequest lists clusters. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project and location) where the clusters + will be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all + zones and all regions. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_clusters, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_cluster( + self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details of a specific cluster. + + Args: + request (:class:`~.cluster_service.GetClusterRequest`): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to retrieve. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_cluster, default_timeout=None, client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_cluster( + self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (:class:`~.cluster_service.CreateClusterRequest`): + The request object. CreateClusterRequest creates a + cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.cluster_service.Cluster`): + Required. A `cluster + resource `__ + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project and location) where the cluster will + be created. Specified in the format + ``projects/*/locations/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster, parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.create_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_cluster( + self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings of a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateClusterRequest`): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (:class:`~.cluster_service.ClusterUpdate`): + Required. A description of the + update. + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, update, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.update_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_node_pool( + self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type for the + specified node pool. + + Args: + request (:class:`~.cluster_service.UpdateNodePoolRequest`): + The request object. UpdateNodePoolRequests update a node + pool's image and/or version. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.update_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_node_pool_autoscaling( + self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings for the specified node + pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolAutoscalingRequest`): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_node_pool_autoscaling, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_logging_service( + self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLoggingServiceRequest`): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (:class:`str`): + Required. The logging service the cluster should use to + write metrics. Currently available options: + + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, logging_service, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_logging_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_monitoring_service( + self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetMonitoringServiceRequest`): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (:class:`str`): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - the Google + Cloud Monitoring service with Kubernetes-native + resource model + - "monitoring.googleapis.com" - the Google Cloud + Monitoring service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, monitoring_service, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_monitoring_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_addons_config( + self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetAddonsConfigRequest`): + The request object. SetAddonsConfigRequest sets the + addons associated with the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (:class:`~.cluster_service.AddonsConfig`): + Required. The desired configurations + for the various addons available to run + in the cluster. + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, addons_config, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_addons_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_locations( + self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLocationsRequest`): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (:class:`Sequence[str]`): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, locations, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_locations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_master( + self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateMasterRequest`): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (:class:`str`): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, master_version, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.update_master, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_master_auth( + self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (:class:`~.cluster_service.SetMasterAuthRequest`): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_master_auth, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_cluster( + self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (:class:`~.cluster_service.DeleteClusterRequest`): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_operations( + self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in a specific zone + or all zones. + + Args: + request (:class:`~.cluster_service.ListOperationsRequest`): + The request object. ListOperationsRequest lists + operations. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (:class:`~.cluster_service.GetOperationRequest`): + The request object. GetOperationRequest gets a single + operation. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def cancel_operation( + self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (:class:`~.cluster_service.CancelOperationRequest`): + The request object. CancelOperationRequest cancels a + single operation. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, operation id) of the + operation to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CancelOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def get_server_config( + self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (:class:`~.cluster_service.GetServerConfigRequest`): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project and location) of the server config to + get, specified in the format ``projects/*/locations/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetServerConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_server_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_node_pools( + self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (:class:`~.cluster_service.ListNodePoolsRequest`): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project, location, cluster id) where the + node pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListNodePoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_node_pools, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_node_pool( + self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (:class:`~.cluster_service.GetNodePoolRequest`): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node + pool. This field has been deprecated and + replaced by the name field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_node_pool( + self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (:class:`~.cluster_service.CreateNodePoolRequest`): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (:class:`~.cluster_service.NodePool`): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project, location, cluster id) where the + node pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool, parent] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.create_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_node_pool( + self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (:class:`~.cluster_service.DeleteNodePoolRequest`): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node pool + to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def rollback_node_pool_upgrade( + self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (:class:`~.cluster_service.RollbackNodePoolUpgradeRequest`): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to rollback. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node pool + to rollback. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node poll to rollback upgrade. Specified in the + format + ``projects/*/locations/*/clusters/*/nodePools/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.rollback_node_pool_upgrade, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_node_pool_management( + self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolManagementRequest`): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolManagementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_node_pool_management, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_labels( + self, + request: cluster_service.SetLabelsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (:class:`~.cluster_service.SetLabelsRequest`): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetLabelsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_labels, default_timeout=None, client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_legacy_abac( + self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (:class:`~.cluster_service.SetLegacyAbacRequest`): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (:class:`bool`): + Required. Whether ABAC authorization + will be enabled in the cluster. + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, enabled, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_legacy_abac, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def start_ip_rotation( + self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (:class:`~.cluster_service.StartIPRotationRequest`): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.StartIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.start_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def complete_ip_rotation( + self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (:class:`~.cluster_service.CompleteIPRotationRequest`): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CompleteIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.complete_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_node_pool_size( + self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolSizeRequest`): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_node_pool_size, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_network_policy( + self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetNetworkPolicyRequest`): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (:class:`~.cluster_service.NetworkPolicy`): + Required. Configuration options for + the NetworkPolicy feature. + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, network_policy, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_network_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_maintenance_policy( + self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetMaintenancePolicyRequest`): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (:class:`str`): + Required. The Google Developers Console `project ID or + project + number `__. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The name of the cluster to + update. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (:class:`~.cluster_service.MaintenancePolicy`): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, maintenance_policy, name] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_maintenance_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_usable_subnetworks( + self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksPager: + r"""Lists subnetworks that are usable for creating + clusters in a project. + + Args: + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks available to a + user for creating clusters. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUsableSubnetworksPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.ListUsableSubnetworksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_usable_subnetworks, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListUsableSubnetworksPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-container",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterManagerClient",) diff --git a/google/cloud/container_v1/services/cluster_manager/pagers.py b/google/cloud/container_v1/services/cluster_manager/pagers.py new file mode 100644 index 00000000..8491351b --- /dev/null +++ b/google/cloud/container_v1/services/cluster_manager/pagers.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.container_v1.types import cluster_service + + +class ListUsableSubnetworksPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`~.cluster_service.ListUsableSubnetworksResponse` object, and + provides an ``__iter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`~.cluster_service.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., cluster_service.ListUsableSubnetworksResponse], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The initial request object. + response (:class:`~.cluster_service.ListUsableSubnetworksResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[cluster_service.UsableSubnetwork]: + for page in self.pages: + yield from page.subnetworks + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListUsableSubnetworksAsyncPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`~.cluster_service.ListUsableSubnetworksResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`~.cluster_service.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[cluster_service.ListUsableSubnetworksResponse]], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The initial request object. + response (:class:`~.cluster_service.ListUsableSubnetworksResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[cluster_service.UsableSubnetwork]: + async def async_generator(): + async for page in self.pages: + for response in page.subnetworks: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/container_v1/services/cluster_manager/transports/__init__.py b/google/cloud/container_v1/services/cluster_manager/transports/__init__.py new file mode 100644 index 00000000..71d01ec8 --- /dev/null +++ b/google/cloud/container_v1/services/cluster_manager/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterManagerTransport +from .grpc import ClusterManagerGrpcTransport +from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] +_transport_registry["grpc"] = ClusterManagerGrpcTransport +_transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport + + +__all__ = ( + "ClusterManagerTransport", + "ClusterManagerGrpcTransport", + "ClusterManagerGrpcAsyncIOTransport", +) diff --git a/google/cloud/container_v1/services/cluster_manager/transports/base.py b/google/cloud/container_v1/services/cluster_manager/transports/base.py new file mode 100644 index 00000000..06635873 --- /dev/null +++ b/google/cloud/container_v1/services/cluster_manager/transports/base.py @@ -0,0 +1,423 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing + +from google import auth +from google.api_core import exceptions # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.container_v1.types import cluster_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +class ClusterManagerTransport(abc.ABC): + """Abstract transport class for ClusterManager.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes + ) + elif credentials is None: + credentials, _ = auth.default(scopes=scopes) + + # Save the credentials. + self._credentials = credentials + + @property + def list_clusters( + self, + ) -> typing.Callable[ + [cluster_service.ListClustersRequest], + typing.Union[ + cluster_service.ListClustersResponse, + typing.Awaitable[cluster_service.ListClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_cluster( + self, + ) -> typing.Callable[ + [cluster_service.GetClusterRequest], + typing.Union[ + cluster_service.Cluster, typing.Awaitable[cluster_service.Cluster] + ], + ]: + raise NotImplementedError() + + @property + def create_cluster( + self, + ) -> typing.Callable[ + [cluster_service.CreateClusterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def update_cluster( + self, + ) -> typing.Callable[ + [cluster_service.UpdateClusterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def update_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.UpdateNodePoolRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_node_pool_autoscaling( + self, + ) -> typing.Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_logging_service( + self, + ) -> typing.Callable[ + [cluster_service.SetLoggingServiceRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_monitoring_service( + self, + ) -> typing.Callable[ + [cluster_service.SetMonitoringServiceRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_addons_config( + self, + ) -> typing.Callable[ + [cluster_service.SetAddonsConfigRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_locations( + self, + ) -> typing.Callable[ + [cluster_service.SetLocationsRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def update_master( + self, + ) -> typing.Callable[ + [cluster_service.UpdateMasterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_master_auth( + self, + ) -> typing.Callable[ + [cluster_service.SetMasterAuthRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def delete_cluster( + self, + ) -> typing.Callable[ + [cluster_service.DeleteClusterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> typing.Callable[ + [cluster_service.ListOperationsRequest], + typing.Union[ + cluster_service.ListOperationsResponse, + typing.Awaitable[cluster_service.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> typing.Callable[ + [cluster_service.GetOperationRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> typing.Callable[ + [cluster_service.CancelOperationRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def get_server_config( + self, + ) -> typing.Callable[ + [cluster_service.GetServerConfigRequest], + typing.Union[ + cluster_service.ServerConfig, typing.Awaitable[cluster_service.ServerConfig] + ], + ]: + raise NotImplementedError() + + @property + def list_node_pools( + self, + ) -> typing.Callable[ + [cluster_service.ListNodePoolsRequest], + typing.Union[ + cluster_service.ListNodePoolsResponse, + typing.Awaitable[cluster_service.ListNodePoolsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.GetNodePoolRequest], + typing.Union[ + cluster_service.NodePool, typing.Awaitable[cluster_service.NodePool] + ], + ]: + raise NotImplementedError() + + @property + def create_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.CreateNodePoolRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def delete_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.DeleteNodePoolRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def rollback_node_pool_upgrade( + self, + ) -> typing.Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_node_pool_management( + self, + ) -> typing.Callable[ + [cluster_service.SetNodePoolManagementRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_labels( + self, + ) -> typing.Callable[ + [cluster_service.SetLabelsRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_legacy_abac( + self, + ) -> typing.Callable[ + [cluster_service.SetLegacyAbacRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def start_ip_rotation( + self, + ) -> typing.Callable[ + [cluster_service.StartIPRotationRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def complete_ip_rotation( + self, + ) -> typing.Callable[ + [cluster_service.CompleteIPRotationRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_node_pool_size( + self, + ) -> typing.Callable[ + [cluster_service.SetNodePoolSizeRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_network_policy( + self, + ) -> typing.Callable[ + [cluster_service.SetNetworkPolicyRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_maintenance_policy( + self, + ) -> typing.Callable[ + [cluster_service.SetMaintenancePolicyRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def list_usable_subnetworks( + self, + ) -> typing.Callable[ + [cluster_service.ListUsableSubnetworksRequest], + typing.Union[ + cluster_service.ListUsableSubnetworksResponse, + typing.Awaitable[cluster_service.ListUsableSubnetworksResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("ClusterManagerTransport",) diff --git a/google/cloud/container_v1/services/cluster_manager/transports/grpc.py b/google/cloud/container_v1/services/cluster_manager/transports/grpc.py new file mode 100644 index 00000000..a9eb88e5 --- /dev/null +++ b/google/cloud/container_v1/services/cluster_manager/transports/grpc.py @@ -0,0 +1,1061 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.container_v1.types import cluster_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ClusterManagerTransport + + +class ClusterManagerGrpcTransport(ClusterManagerTransport): + """gRPC backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default(scopes=self.AUTH_SCOPES) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + ) + + self._stubs = {} # type: Dict[str, Callable] + + @classmethod + def create_channel( + cls, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def list_clusters( + self, + ) -> Callable[ + [cluster_service.ListClustersRequest], cluster_service.ListClustersResponse + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListClusters", + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def get_cluster( + self, + ) -> Callable[[cluster_service.GetClusterRequest], cluster_service.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details of a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetCluster", + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def create_cluster( + self, + ) -> Callable[[cluster_service.CreateClusterRequest], cluster_service.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CreateCluster", + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[[cluster_service.UpdateClusterRequest], cluster_service.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings of a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/UpdateCluster", + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_cluster"] + + @property + def update_node_pool( + self, + ) -> Callable[[cluster_service.UpdateNodePoolRequest], cluster_service.Operation]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type for the + specified node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_node_pool" not in self._stubs: + self._stubs["update_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/UpdateNodePool", + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_node_pool"] + + @property + def set_node_pool_autoscaling( + self, + ) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], cluster_service.Operation + ]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings for the specified node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_autoscaling" not in self._stubs: + self._stubs["set_node_pool_autoscaling"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_autoscaling"] + + @property + def set_logging_service( + self, + ) -> Callable[ + [cluster_service.SetLoggingServiceRequest], cluster_service.Operation + ]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_logging_service" not in self._stubs: + self._stubs["set_logging_service"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLoggingService", + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_logging_service"] + + @property + def set_monitoring_service( + self, + ) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], cluster_service.Operation + ]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_monitoring_service" not in self._stubs: + self._stubs["set_monitoring_service"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetMonitoringService", + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_monitoring_service"] + + @property + def set_addons_config( + self, + ) -> Callable[[cluster_service.SetAddonsConfigRequest], cluster_service.Operation]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_addons_config" not in self._stubs: + self._stubs["set_addons_config"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetAddonsConfig", + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_addons_config"] + + @property + def set_locations( + self, + ) -> Callable[[cluster_service.SetLocationsRequest], cluster_service.Operation]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. + + Returns: + Callable[[~.SetLocationsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_locations" not in self._stubs: + self._stubs["set_locations"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLocations", + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_locations"] + + @property + def update_master( + self, + ) -> Callable[[cluster_service.UpdateMasterRequest], cluster_service.Operation]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_master" not in self._stubs: + self._stubs["update_master"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/UpdateMaster", + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_master"] + + @property + def set_master_auth( + self, + ) -> Callable[[cluster_service.SetMasterAuthRequest], cluster_service.Operation]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_master_auth" not in self._stubs: + self._stubs["set_master_auth"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetMasterAuth", + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_master_auth"] + + @property + def delete_cluster( + self, + ) -> Callable[[cluster_service.DeleteClusterRequest], cluster_service.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/DeleteCluster", + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_cluster"] + + @property + def list_operations( + self, + ) -> Callable[ + [cluster_service.ListOperationsRequest], cluster_service.ListOperationsResponse + ]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in a specific zone + or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + ~.ListOperationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListOperations", + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs["list_operations"] + + @property + def get_operation( + self, + ) -> Callable[[cluster_service.GetOperationRequest], cluster_service.Operation]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetOperation", + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["get_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[cluster_service.CancelOperationRequest], empty.Empty]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CancelOperation", + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["cancel_operation"] + + @property + def get_server_config( + self, + ) -> Callable[ + [cluster_service.GetServerConfigRequest], cluster_service.ServerConfig + ]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + ~.ServerConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_server_config" not in self._stubs: + self._stubs["get_server_config"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetServerConfig", + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs["get_server_config"] + + @property + def list_node_pools( + self, + ) -> Callable[ + [cluster_service.ListNodePoolsRequest], cluster_service.ListNodePoolsResponse + ]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + ~.ListNodePoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_node_pools" not in self._stubs: + self._stubs["list_node_pools"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListNodePools", + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs["list_node_pools"] + + @property + def get_node_pool( + self, + ) -> Callable[[cluster_service.GetNodePoolRequest], cluster_service.NodePool]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + ~.NodePool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_node_pool" not in self._stubs: + self._stubs["get_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetNodePool", + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs["get_node_pool"] + + @property + def create_node_pool( + self, + ) -> Callable[[cluster_service.CreateNodePoolRequest], cluster_service.Operation]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_node_pool" not in self._stubs: + self._stubs["create_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CreateNodePool", + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_node_pool"] + + @property + def delete_node_pool( + self, + ) -> Callable[[cluster_service.DeleteNodePoolRequest], cluster_service.Operation]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_node_pool" not in self._stubs: + self._stubs["delete_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/DeleteNodePool", + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_node_pool"] + + @property + def rollback_node_pool_upgrade( + self, + ) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], cluster_service.Operation + ]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rollback_node_pool_upgrade" not in self._stubs: + self._stubs["rollback_node_pool_upgrade"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["rollback_node_pool_upgrade"] + + @property + def set_node_pool_management( + self, + ) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], cluster_service.Operation + ]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_management" not in self._stubs: + self._stubs["set_node_pool_management"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNodePoolManagement", + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_management"] + + @property + def set_labels( + self, + ) -> Callable[[cluster_service.SetLabelsRequest], cluster_service.Operation]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_labels" not in self._stubs: + self._stubs["set_labels"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLabels", + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_labels"] + + @property + def set_legacy_abac( + self, + ) -> Callable[[cluster_service.SetLegacyAbacRequest], cluster_service.Operation]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_legacy_abac" not in self._stubs: + self._stubs["set_legacy_abac"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLegacyAbac", + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_legacy_abac"] + + @property + def start_ip_rotation( + self, + ) -> Callable[[cluster_service.StartIPRotationRequest], cluster_service.Operation]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_ip_rotation" not in self._stubs: + self._stubs["start_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/StartIPRotation", + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["start_ip_rotation"] + + @property + def complete_ip_rotation( + self, + ) -> Callable[ + [cluster_service.CompleteIPRotationRequest], cluster_service.Operation + ]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "complete_ip_rotation" not in self._stubs: + self._stubs["complete_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CompleteIPRotation", + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["complete_ip_rotation"] + + @property + def set_node_pool_size( + self, + ) -> Callable[[cluster_service.SetNodePoolSizeRequest], cluster_service.Operation]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_size" not in self._stubs: + self._stubs["set_node_pool_size"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNodePoolSize", + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_size"] + + @property + def set_network_policy( + self, + ) -> Callable[[cluster_service.SetNetworkPolicyRequest], cluster_service.Operation]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_network_policy" not in self._stubs: + self._stubs["set_network_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNetworkPolicy", + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_network_policy"] + + @property + def set_maintenance_policy( + self, + ) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], cluster_service.Operation + ]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_maintenance_policy" not in self._stubs: + self._stubs["set_maintenance_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetMaintenancePolicy", + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_maintenance_policy"] + + @property + def list_usable_subnetworks( + self, + ) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + cluster_service.ListUsableSubnetworksResponse, + ]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that are usable for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + ~.ListUsableSubnetworksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_usable_subnetworks" not in self._stubs: + self._stubs["list_usable_subnetworks"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListUsableSubnetworks", + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs["list_usable_subnetworks"] + + +__all__ = ("ClusterManagerGrpcTransport",) diff --git a/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py b/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py new file mode 100644 index 00000000..add48e49 --- /dev/null +++ b/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py @@ -0,0 +1,1102 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.container_v1.types import cluster_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ClusterManagerTransport +from .grpc import ClusterManagerGrpcTransport + + +class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): + """gRPC AsyncIO backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + **kwargs + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + **kwargs + ) + + def __init__( + self, + *, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def list_clusters( + self, + ) -> Callable[ + [cluster_service.ListClustersRequest], + Awaitable[cluster_service.ListClustersResponse], + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListClusters", + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def get_cluster( + self, + ) -> Callable[ + [cluster_service.GetClusterRequest], Awaitable[cluster_service.Cluster] + ]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details of a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetCluster", + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def create_cluster( + self, + ) -> Callable[ + [cluster_service.CreateClusterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CreateCluster", + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[ + [cluster_service.UpdateClusterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings of a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/UpdateCluster", + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_cluster"] + + @property + def update_node_pool( + self, + ) -> Callable[ + [cluster_service.UpdateNodePoolRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type for the + specified node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_node_pool" not in self._stubs: + self._stubs["update_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/UpdateNodePool", + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_node_pool"] + + @property + def set_node_pool_autoscaling( + self, + ) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings for the specified node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_autoscaling" not in self._stubs: + self._stubs["set_node_pool_autoscaling"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_autoscaling"] + + @property + def set_logging_service( + self, + ) -> Callable[ + [cluster_service.SetLoggingServiceRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_logging_service" not in self._stubs: + self._stubs["set_logging_service"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLoggingService", + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_logging_service"] + + @property + def set_monitoring_service( + self, + ) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_monitoring_service" not in self._stubs: + self._stubs["set_monitoring_service"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetMonitoringService", + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_monitoring_service"] + + @property + def set_addons_config( + self, + ) -> Callable[ + [cluster_service.SetAddonsConfigRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_addons_config" not in self._stubs: + self._stubs["set_addons_config"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetAddonsConfig", + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_addons_config"] + + @property + def set_locations( + self, + ) -> Callable[ + [cluster_service.SetLocationsRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. + + Returns: + Callable[[~.SetLocationsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_locations" not in self._stubs: + self._stubs["set_locations"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLocations", + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_locations"] + + @property + def update_master( + self, + ) -> Callable[ + [cluster_service.UpdateMasterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_master" not in self._stubs: + self._stubs["update_master"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/UpdateMaster", + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_master"] + + @property + def set_master_auth( + self, + ) -> Callable[ + [cluster_service.SetMasterAuthRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_master_auth" not in self._stubs: + self._stubs["set_master_auth"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetMasterAuth", + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_master_auth"] + + @property + def delete_cluster( + self, + ) -> Callable[ + [cluster_service.DeleteClusterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/DeleteCluster", + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_cluster"] + + @property + def list_operations( + self, + ) -> Callable[ + [cluster_service.ListOperationsRequest], + Awaitable[cluster_service.ListOperationsResponse], + ]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in a specific zone + or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + Awaitable[~.ListOperationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListOperations", + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs["list_operations"] + + @property + def get_operation( + self, + ) -> Callable[ + [cluster_service.GetOperationRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetOperation", + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["get_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[cluster_service.CancelOperationRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CancelOperation", + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["cancel_operation"] + + @property + def get_server_config( + self, + ) -> Callable[ + [cluster_service.GetServerConfigRequest], + Awaitable[cluster_service.ServerConfig], + ]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + Awaitable[~.ServerConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_server_config" not in self._stubs: + self._stubs["get_server_config"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetServerConfig", + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs["get_server_config"] + + @property + def list_node_pools( + self, + ) -> Callable[ + [cluster_service.ListNodePoolsRequest], + Awaitable[cluster_service.ListNodePoolsResponse], + ]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + Awaitable[~.ListNodePoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_node_pools" not in self._stubs: + self._stubs["list_node_pools"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListNodePools", + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs["list_node_pools"] + + @property + def get_node_pool( + self, + ) -> Callable[ + [cluster_service.GetNodePoolRequest], Awaitable[cluster_service.NodePool] + ]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + Awaitable[~.NodePool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_node_pool" not in self._stubs: + self._stubs["get_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/GetNodePool", + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs["get_node_pool"] + + @property + def create_node_pool( + self, + ) -> Callable[ + [cluster_service.CreateNodePoolRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_node_pool" not in self._stubs: + self._stubs["create_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CreateNodePool", + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_node_pool"] + + @property + def delete_node_pool( + self, + ) -> Callable[ + [cluster_service.DeleteNodePoolRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_node_pool" not in self._stubs: + self._stubs["delete_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/DeleteNodePool", + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_node_pool"] + + @property + def rollback_node_pool_upgrade( + self, + ) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rollback_node_pool_upgrade" not in self._stubs: + self._stubs["rollback_node_pool_upgrade"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["rollback_node_pool_upgrade"] + + @property + def set_node_pool_management( + self, + ) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_management" not in self._stubs: + self._stubs["set_node_pool_management"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNodePoolManagement", + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_management"] + + @property + def set_labels( + self, + ) -> Callable[ + [cluster_service.SetLabelsRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_labels" not in self._stubs: + self._stubs["set_labels"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLabels", + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_labels"] + + @property + def set_legacy_abac( + self, + ) -> Callable[ + [cluster_service.SetLegacyAbacRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_legacy_abac" not in self._stubs: + self._stubs["set_legacy_abac"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetLegacyAbac", + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_legacy_abac"] + + @property + def start_ip_rotation( + self, + ) -> Callable[ + [cluster_service.StartIPRotationRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_ip_rotation" not in self._stubs: + self._stubs["start_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/StartIPRotation", + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["start_ip_rotation"] + + @property + def complete_ip_rotation( + self, + ) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "complete_ip_rotation" not in self._stubs: + self._stubs["complete_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/CompleteIPRotation", + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["complete_ip_rotation"] + + @property + def set_node_pool_size( + self, + ) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_size" not in self._stubs: + self._stubs["set_node_pool_size"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNodePoolSize", + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_size"] + + @property + def set_network_policy( + self, + ) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_network_policy" not in self._stubs: + self._stubs["set_network_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetNetworkPolicy", + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_network_policy"] + + @property + def set_maintenance_policy( + self, + ) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_maintenance_policy" not in self._stubs: + self._stubs["set_maintenance_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/SetMaintenancePolicy", + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_maintenance_policy"] + + @property + def list_usable_subnetworks( + self, + ) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + Awaitable[cluster_service.ListUsableSubnetworksResponse], + ]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that are usable for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + Awaitable[~.ListUsableSubnetworksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_usable_subnetworks" not in self._stubs: + self._stubs["list_usable_subnetworks"] = self.grpc_channel.unary_unary( + "/google.container.v1.ClusterManager/ListUsableSubnetworks", + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs["list_usable_subnetworks"] + + +__all__ = ("ClusterManagerGrpcAsyncIOTransport",) diff --git a/google/cloud/container_v1/types.py b/google/cloud/container_v1/types.py deleted file mode 100644 index 77615ac4..00000000 --- a/google/cloud/container_v1/types.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.container_v1.proto import cluster_service_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import timestamp_pb2 - - -_shared_modules = [ - empty_pb2, - timestamp_pb2, -] - -_local_modules = [ - cluster_service_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.container_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/container_v1/types/__init__.py b/google/cloud/container_v1/types/__init__.py new file mode 100644 index 00000000..1ab79887 --- /dev/null +++ b/google/cloud/container_v1/types/__init__.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .cluster_service import ( + NodeConfig, + ShieldedInstanceConfig, + NodeTaint, + MasterAuth, + ClientCertificateConfig, + AddonsConfig, + HttpLoadBalancing, + HorizontalPodAutoscaling, + KubernetesDashboard, + NetworkPolicyConfig, + PrivateClusterConfig, + AuthenticatorGroupsConfig, + CloudRunConfig, + MasterAuthorizedNetworksConfig, + LegacyAbac, + NetworkPolicy, + BinaryAuthorization, + IPAllocationPolicy, + Cluster, + ClusterUpdate, + Operation, + CreateClusterRequest, + GetClusterRequest, + UpdateClusterRequest, + UpdateNodePoolRequest, + SetNodePoolAutoscalingRequest, + SetLoggingServiceRequest, + SetMonitoringServiceRequest, + SetAddonsConfigRequest, + SetLocationsRequest, + UpdateMasterRequest, + SetMasterAuthRequest, + DeleteClusterRequest, + ListClustersRequest, + ListClustersResponse, + GetOperationRequest, + ListOperationsRequest, + CancelOperationRequest, + ListOperationsResponse, + GetServerConfigRequest, + ServerConfig, + CreateNodePoolRequest, + DeleteNodePoolRequest, + ListNodePoolsRequest, + GetNodePoolRequest, + NodePool, + NodeManagement, + AutoUpgradeOptions, + MaintenancePolicy, + MaintenanceWindow, + TimeWindow, + RecurringTimeWindow, + DailyMaintenanceWindow, + SetNodePoolManagementRequest, + SetNodePoolSizeRequest, + RollbackNodePoolUpgradeRequest, + ListNodePoolsResponse, + ClusterAutoscaling, + AutoprovisioningNodePoolDefaults, + ResourceLimit, + NodePoolAutoscaling, + SetLabelsRequest, + SetLegacyAbacRequest, + StartIPRotationRequest, + CompleteIPRotationRequest, + AcceleratorConfig, + SetNetworkPolicyRequest, + SetMaintenancePolicyRequest, + StatusCondition, + NetworkConfig, + IntraNodeVisibilityConfig, + MaxPodsConstraint, + DatabaseEncryption, + ListUsableSubnetworksRequest, + ListUsableSubnetworksResponse, + UsableSubnetworkSecondaryRange, + UsableSubnetwork, + ResourceUsageExportConfig, + VerticalPodAutoscaling, +) + + +__all__ = ( + "NodeConfig", + "ShieldedInstanceConfig", + "NodeTaint", + "MasterAuth", + "ClientCertificateConfig", + "AddonsConfig", + "HttpLoadBalancing", + "HorizontalPodAutoscaling", + "KubernetesDashboard", + "NetworkPolicyConfig", + "PrivateClusterConfig", + "AuthenticatorGroupsConfig", + "CloudRunConfig", + "MasterAuthorizedNetworksConfig", + "LegacyAbac", + "NetworkPolicy", + "BinaryAuthorization", + "IPAllocationPolicy", + "Cluster", + "ClusterUpdate", + "Operation", + "CreateClusterRequest", + "GetClusterRequest", + "UpdateClusterRequest", + "UpdateNodePoolRequest", + "SetNodePoolAutoscalingRequest", + "SetLoggingServiceRequest", + "SetMonitoringServiceRequest", + "SetAddonsConfigRequest", + "SetLocationsRequest", + "UpdateMasterRequest", + "SetMasterAuthRequest", + "DeleteClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "GetOperationRequest", + "ListOperationsRequest", + "CancelOperationRequest", + "ListOperationsResponse", + "GetServerConfigRequest", + "ServerConfig", + "CreateNodePoolRequest", + "DeleteNodePoolRequest", + "ListNodePoolsRequest", + "GetNodePoolRequest", + "NodePool", + "NodeManagement", + "AutoUpgradeOptions", + "MaintenancePolicy", + "MaintenanceWindow", + "TimeWindow", + "RecurringTimeWindow", + "DailyMaintenanceWindow", + "SetNodePoolManagementRequest", + "SetNodePoolSizeRequest", + "RollbackNodePoolUpgradeRequest", + "ListNodePoolsResponse", + "ClusterAutoscaling", + "AutoprovisioningNodePoolDefaults", + "ResourceLimit", + "NodePoolAutoscaling", + "SetLabelsRequest", + "SetLegacyAbacRequest", + "StartIPRotationRequest", + "CompleteIPRotationRequest", + "AcceleratorConfig", + "SetNetworkPolicyRequest", + "SetMaintenancePolicyRequest", + "StatusCondition", + "NetworkConfig", + "IntraNodeVisibilityConfig", + "MaxPodsConstraint", + "DatabaseEncryption", + "ListUsableSubnetworksRequest", + "ListUsableSubnetworksResponse", + "UsableSubnetworkSecondaryRange", + "UsableSubnetwork", + "ResourceUsageExportConfig", + "VerticalPodAutoscaling", +) diff --git a/google/cloud/container_v1/types/cluster_service.py b/google/cloud/container_v1/types/cluster_service.py new file mode 100644 index 00000000..6ae45902 --- /dev/null +++ b/google/cloud/container_v1/types/cluster_service.py @@ -0,0 +1,3415 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.container.v1", + manifest={ + "NodeConfig", + "ShieldedInstanceConfig", + "NodeTaint", + "MasterAuth", + "ClientCertificateConfig", + "AddonsConfig", + "HttpLoadBalancing", + "HorizontalPodAutoscaling", + "KubernetesDashboard", + "NetworkPolicyConfig", + "PrivateClusterConfig", + "AuthenticatorGroupsConfig", + "CloudRunConfig", + "MasterAuthorizedNetworksConfig", + "LegacyAbac", + "NetworkPolicy", + "BinaryAuthorization", + "IPAllocationPolicy", + "Cluster", + "ClusterUpdate", + "Operation", + "CreateClusterRequest", + "GetClusterRequest", + "UpdateClusterRequest", + "UpdateNodePoolRequest", + "SetNodePoolAutoscalingRequest", + "SetLoggingServiceRequest", + "SetMonitoringServiceRequest", + "SetAddonsConfigRequest", + "SetLocationsRequest", + "UpdateMasterRequest", + "SetMasterAuthRequest", + "DeleteClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "GetOperationRequest", + "ListOperationsRequest", + "CancelOperationRequest", + "ListOperationsResponse", + "GetServerConfigRequest", + "ServerConfig", + "CreateNodePoolRequest", + "DeleteNodePoolRequest", + "ListNodePoolsRequest", + "GetNodePoolRequest", + "NodePool", + "NodeManagement", + "AutoUpgradeOptions", + "MaintenancePolicy", + "MaintenanceWindow", + "TimeWindow", + "RecurringTimeWindow", + "DailyMaintenanceWindow", + "SetNodePoolManagementRequest", + "SetNodePoolSizeRequest", + "RollbackNodePoolUpgradeRequest", + "ListNodePoolsResponse", + "ClusterAutoscaling", + "AutoprovisioningNodePoolDefaults", + "ResourceLimit", + "NodePoolAutoscaling", + "SetLabelsRequest", + "SetLegacyAbacRequest", + "StartIPRotationRequest", + "CompleteIPRotationRequest", + "AcceleratorConfig", + "SetNetworkPolicyRequest", + "SetMaintenancePolicyRequest", + "StatusCondition", + "NetworkConfig", + "IntraNodeVisibilityConfig", + "MaxPodsConstraint", + "DatabaseEncryption", + "ListUsableSubnetworksRequest", + "ListUsableSubnetworksResponse", + "UsableSubnetworkSecondaryRange", + "UsableSubnetwork", + "ResourceUsageExportConfig", + "VerticalPodAutoscaling", + }, +) + + +class NodeConfig(proto.Message): + r"""Parameters that describe the nodes in a cluster. + + Attributes: + machine_type (str): + The name of a Google Compute Engine `machine + type `__ + (e.g. ``n1-standard-1``). + + If unspecified, the default machine type is + ``n1-standard-1``. + disk_size_gb (int): + Size of the disk attached to each node, + specified in GB. The smallest allowed disk size + is 10GB. + If unspecified, the default disk size is 100GB. + oauth_scopes (Sequence[str]): + The set of Google API scopes to be made available on all of + the node VMs under the "default" service account. + + The following scopes are recommended, but not required, and + by default are not included: + + - ``https://www.googleapis.com/auth/compute`` is required + for mounting persistent storage on your nodes. + - ``https://www.googleapis.com/auth/devstorage.read_only`` + is required for communicating with **gcr.io** (the + `Google Container + Registry `__). + + If unspecified, no scopes are added, unless Cloud Logging or + Cloud Monitoring are enabled, in which case their required + scopes will be added. + service_account (str): + The Google Cloud Platform Service Account to + be used by the node VMs. If no Service Account + is specified, the "default" service account is + used. + metadata (Sequence[~.cluster_service.NodeConfig.MetadataEntry]): + The metadata key/value pairs assigned to instances in the + cluster. + + Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less + than 128 bytes in length. These are reflected as part of a + URL in the metadata server. Additionally, to avoid + ambiguity, keys must not conflict with any other metadata + keys for the project or be one of the reserved keys: + "cluster-location" "cluster-name" "cluster-uid" + "configure-sh" "containerd-configure-sh" "enable-os-login" + "gci-update-strategy" "gci-ensure-gke-docker" + "instance-template" "kube-env" "startup-script" "user-data" + "disable-address-manager" "windows-startup-script-ps1" + "common-psm1" "k8s-node-setup-psm1" "install-ssh-psm1" + "user-profile-psm1" "serial-port-logging-enable" + + Values are free-form strings, and only have meaning as + interpreted by the image running in the instance. The only + restriction placed on them is that each value's size must be + less than or equal to 32 KB. + + The total size of all keys and values must be less than 512 + KB. + image_type (str): + The image type to use for this node. Note + that for a given image type, the latest version + of it will be used. + labels (Sequence[~.cluster_service.NodeConfig.LabelsEntry]): + The map of Kubernetes labels (key/value + pairs) to be applied to each node. These will + added in addition to any default label(s) that + Kubernetes may apply to the node. + In case of conflict in label keys, the applied + set may differ depending on the Kubernetes + version -- it's best to assume the behavior is + undefined and conflicts should be avoided. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/overview/working- + with-objects/labels/ + local_ssd_count (int): + The number of local SSD disks to be attached + to the node. + The limit for this value is dependent upon the + maximum number of disks available on a machine + per zone. See: + https://cloud.google.com/compute/docs/disks/local- + ssd for more information. + tags (Sequence[str]): + The list of instance tags applied to all + nodes. Tags are used to identify valid sources + or targets for network firewalls and are + specified by the client during cluster or node + pool creation. Each tag within the list must + comply with RFC1035. + preemptible (bool): + Whether the nodes are created as preemptible + VM instances. See: + https://cloud.google.com/compute/docs/instances/preemptible + for more information about preemptible VM + instances. + accelerators (Sequence[~.cluster_service.AcceleratorConfig]): + A list of hardware accelerators to be + attached to each node. See + https://cloud.google.com/compute/docs/gpus for + more information about support for GPUs. + disk_type (str): + Type of the disk attached to each node (e.g. + 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd- + standard' + min_cpu_platform (str): + Minimum CPU platform to be used by this instance. The + instance may be scheduled on the specified or newer CPU + platform. Applicable values are the friendly names of CPU + platforms, such as minCpuPlatform: "Intel Haswell" or + minCpuPlatform: "Intel Sandy Bridge". For more information, + read `how to specify min CPU + platform `__ + taints (Sequence[~.cluster_service.NodeTaint]): + List of kubernetes taints to be applied to + each node. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/configuration/taint- + and-toleration/ + shielded_instance_config (~.cluster_service.ShieldedInstanceConfig): + Shielded Instance options. + """ + + machine_type = proto.Field(proto.STRING, number=1) + + disk_size_gb = proto.Field(proto.INT32, number=2) + + oauth_scopes = proto.RepeatedField(proto.STRING, number=3) + + service_account = proto.Field(proto.STRING, number=9) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=4) + + image_type = proto.Field(proto.STRING, number=5) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + local_ssd_count = proto.Field(proto.INT32, number=7) + + tags = proto.RepeatedField(proto.STRING, number=8) + + preemptible = proto.Field(proto.BOOL, number=10) + + accelerators = proto.RepeatedField( + proto.MESSAGE, number=11, message="AcceleratorConfig", + ) + + disk_type = proto.Field(proto.STRING, number=12) + + min_cpu_platform = proto.Field(proto.STRING, number=13) + + taints = proto.RepeatedField(proto.MESSAGE, number=15, message="NodeTaint",) + + shielded_instance_config = proto.Field( + proto.MESSAGE, number=20, message="ShieldedInstanceConfig", + ) + + +class ShieldedInstanceConfig(proto.Message): + r"""A set of Shielded Instance options. + + Attributes: + enable_secure_boot (bool): + Defines whether the instance has Secure Boot + enabled. + Secure Boot helps ensure that the system only + runs authentic software by verifying the digital + signature of all boot components, and halting + the boot process if signature verification + fails. + enable_integrity_monitoring (bool): + Defines whether the instance has integrity + monitoring enabled. + Enables monitoring and attestation of the boot + integrity of the instance. The attestation is + performed against the integrity policy baseline. + This baseline is initially derived from the + implicitly trusted boot image when the instance + is created. + """ + + enable_secure_boot = proto.Field(proto.BOOL, number=1) + + enable_integrity_monitoring = proto.Field(proto.BOOL, number=2) + + +class NodeTaint(proto.Message): + r"""Kubernetes taint is comprised of three fields: key, value, + and effect. Effect can only be one of three types: NoSchedule, + PreferNoSchedule or NoExecute. + For more information, including usage and the valid values, see: + https://kubernetes.io/docs/concepts/configuration/taint-and- + toleration/ + + Attributes: + key (str): + Key for taint. + value (str): + Value for taint. + effect (~.cluster_service.NodeTaint.Effect): + Effect for taint. + """ + + class Effect(proto.Enum): + r"""Possible values for Effect in taint.""" + EFFECT_UNSPECIFIED = 0 + NO_SCHEDULE = 1 + PREFER_NO_SCHEDULE = 2 + NO_EXECUTE = 3 + + key = proto.Field(proto.STRING, number=1) + + value = proto.Field(proto.STRING, number=2) + + effect = proto.Field(proto.ENUM, number=3, enum=Effect,) + + +class MasterAuth(proto.Message): + r"""The authentication information for accessing the master + endpoint. Authentication can be done using HTTP basic auth or + using client certificates. + + Attributes: + username (str): + The username to use for HTTP basic + authentication to the master endpoint. For + clusters v1.6.0 and later, basic authentication + can be disabled by leaving username unspecified + (or setting it to the empty string). + password (str): + The password to use for HTTP basic + authentication to the master endpoint. Because + the master endpoint is open to the Internet, you + should create a strong password. If a password + is provided for cluster creation, username must + be non-empty. + client_certificate_config (~.cluster_service.ClientCertificateConfig): + Configuration for client certificate + authentication on the cluster. For clusters + before v1.12, if no configuration is specified, + a client certificate is issued. + cluster_ca_certificate (str): + [Output only] Base64-encoded public certificate that is the + root of trust for the cluster. + client_certificate (str): + [Output only] Base64-encoded public certificate used by + clients to authenticate to the cluster endpoint. + client_key (str): + [Output only] Base64-encoded private key used by clients to + authenticate to the cluster endpoint. + """ + + username = proto.Field(proto.STRING, number=1) + + password = proto.Field(proto.STRING, number=2) + + client_certificate_config = proto.Field( + proto.MESSAGE, number=3, message="ClientCertificateConfig", + ) + + cluster_ca_certificate = proto.Field(proto.STRING, number=100) + + client_certificate = proto.Field(proto.STRING, number=101) + + client_key = proto.Field(proto.STRING, number=102) + + +class ClientCertificateConfig(proto.Message): + r"""Configuration for client certificates on the cluster. + + Attributes: + issue_client_certificate (bool): + Issue a client certificate. + """ + + issue_client_certificate = proto.Field(proto.BOOL, number=1) + + +class AddonsConfig(proto.Message): + r"""Configuration for the addons that can be automatically spun + up in the cluster, enabling additional functionality. + + Attributes: + http_load_balancing (~.cluster_service.HttpLoadBalancing): + Configuration for the HTTP (L7) load + balancing controller addon, which makes it easy + to set up HTTP load balancers for services in a + cluster. + horizontal_pod_autoscaling (~.cluster_service.HorizontalPodAutoscaling): + Configuration for the horizontal pod + autoscaling feature, which increases or + decreases the number of replica pods a + replication controller has based on the resource + usage of the existing pods. + kubernetes_dashboard (~.cluster_service.KubernetesDashboard): + Configuration for the Kubernetes Dashboard. + This addon is deprecated, and will be disabled + in 1.15. It is recommended to use the Cloud + Console to manage and monitor your Kubernetes + clusters, workloads and applications. For more + information, see: + https://cloud.google.com/kubernetes- + engine/docs/concepts/dashboards + network_policy_config (~.cluster_service.NetworkPolicyConfig): + Configuration for NetworkPolicy. This only + tracks whether the addon is enabled or not on + the Master, it does not track whether network + policy is enabled for the nodes. + cloud_run_config (~.cluster_service.CloudRunConfig): + Configuration for the Cloud Run addon, which + allows the user to use a managed Knative + service. + """ + + http_load_balancing = proto.Field( + proto.MESSAGE, number=1, message="HttpLoadBalancing", + ) + + horizontal_pod_autoscaling = proto.Field( + proto.MESSAGE, number=2, message="HorizontalPodAutoscaling", + ) + + kubernetes_dashboard = proto.Field( + proto.MESSAGE, number=3, message="KubernetesDashboard", + ) + + network_policy_config = proto.Field( + proto.MESSAGE, number=4, message="NetworkPolicyConfig", + ) + + cloud_run_config = proto.Field(proto.MESSAGE, number=7, message="CloudRunConfig",) + + +class HttpLoadBalancing(proto.Message): + r"""Configuration options for the HTTP (L7) load balancing + controller addon, which makes it easy to set up HTTP load + balancers for services in a cluster. + + Attributes: + disabled (bool): + Whether the HTTP Load Balancing controller is + enabled in the cluster. When enabled, it runs a + small pod in the cluster that manages the load + balancers. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class HorizontalPodAutoscaling(proto.Message): + r"""Configuration options for the horizontal pod autoscaling + feature, which increases or decreases the number of replica pods + a replication controller has based on the resource usage of the + existing pods. + + Attributes: + disabled (bool): + Whether the Horizontal Pod Autoscaling + feature is enabled in the cluster. When enabled, + it ensures that a Heapster pod is running in the + cluster, which is also used by the Cloud + Monitoring service. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class KubernetesDashboard(proto.Message): + r"""Configuration for the Kubernetes Dashboard. + + Attributes: + disabled (bool): + Whether the Kubernetes Dashboard is enabled + for this cluster. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class NetworkPolicyConfig(proto.Message): + r"""Configuration for NetworkPolicy. This only tracks whether the + addon is enabled or not on the Master, it does not track whether + network policy is enabled for the nodes. + + Attributes: + disabled (bool): + Whether NetworkPolicy is enabled for this + cluster. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class PrivateClusterConfig(proto.Message): + r"""Configuration options for private clusters. + + Attributes: + enable_private_nodes (bool): + Whether nodes have internal IP addresses + only. If enabled, all nodes are given only RFC + 1918 private addresses and communicate with the + master via private networking. + enable_private_endpoint (bool): + Whether the master's internal IP address is + used as the cluster endpoint. + master_ipv4_cidr_block (str): + The IP range in CIDR notation to use for the + hosted master network. This range will be used + for assigning internal IP addresses to the + master or set of masters, as well as the ILB + VIP. This range must not overlap with any other + ranges in use within the cluster's network. + private_endpoint (str): + Output only. The internal IP address of this + cluster's master endpoint. + public_endpoint (str): + Output only. The external IP address of this + cluster's master endpoint. + """ + + enable_private_nodes = proto.Field(proto.BOOL, number=1) + + enable_private_endpoint = proto.Field(proto.BOOL, number=2) + + master_ipv4_cidr_block = proto.Field(proto.STRING, number=3) + + private_endpoint = proto.Field(proto.STRING, number=4) + + public_endpoint = proto.Field(proto.STRING, number=5) + + +class AuthenticatorGroupsConfig(proto.Message): + r"""Configuration for returning group information from + authenticators. + + Attributes: + enabled (bool): + Whether this cluster should return group + membership lookups during authentication using a + group of security groups. + security_group (str): + The name of the security group-of-groups to + be used. Only relevant if enabled = true. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + security_group = proto.Field(proto.STRING, number=2) + + +class CloudRunConfig(proto.Message): + r"""Configuration options for the Cloud Run feature. + + Attributes: + disabled (bool): + Whether Cloud Run addon is enabled for this + cluster. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class MasterAuthorizedNetworksConfig(proto.Message): + r"""Configuration options for the master authorized networks + feature. Enabled master authorized networks will disallow all + external traffic to access Kubernetes master through HTTPS + except traffic from the given CIDR blocks, Google Compute Engine + Public IPs and Google Prod IPs. + + Attributes: + enabled (bool): + Whether or not master authorized networks is + enabled. + cidr_blocks (Sequence[~.cluster_service.MasterAuthorizedNetworksConfig.CidrBlock]): + cidr_blocks define up to 50 external networks that could + access Kubernetes master through HTTPS. + """ + + class CidrBlock(proto.Message): + r"""CidrBlock contains an optional name and one CIDR block. + + Attributes: + display_name (str): + display_name is an optional field for users to identify CIDR + blocks. + cidr_block (str): + cidr_block must be specified in CIDR notation. + """ + + display_name = proto.Field(proto.STRING, number=1) + + cidr_block = proto.Field(proto.STRING, number=2) + + enabled = proto.Field(proto.BOOL, number=1) + + cidr_blocks = proto.RepeatedField(proto.MESSAGE, number=2, message=CidrBlock,) + + +class LegacyAbac(proto.Message): + r"""Configuration for the legacy Attribute Based Access Control + authorization mode. + + Attributes: + enabled (bool): + Whether the ABAC authorizer is enabled for + this cluster. When enabled, identities in the + system, including service accounts, nodes, and + controllers, will have statically granted + permissions beyond those provided by the RBAC + configuration or IAM. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class NetworkPolicy(proto.Message): + r"""Configuration options for the NetworkPolicy feature. + https://kubernetes.io/docs/concepts/services- + networking/networkpolicies/ + + Attributes: + provider (~.cluster_service.NetworkPolicy.Provider): + The selected network policy provider. + enabled (bool): + Whether network policy is enabled on the + cluster. + """ + + class Provider(proto.Enum): + r"""Allowed Network Policy providers.""" + PROVIDER_UNSPECIFIED = 0 + CALICO = 1 + + provider = proto.Field(proto.ENUM, number=1, enum=Provider,) + + enabled = proto.Field(proto.BOOL, number=2) + + +class BinaryAuthorization(proto.Message): + r"""Configuration for Binary Authorization. + + Attributes: + enabled (bool): + Enable Binary Authorization for this cluster. + If enabled, all container images will be + validated by Binary Authorization. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class IPAllocationPolicy(proto.Message): + r"""Configuration for controlling how IPs are allocated in the + cluster. + + Attributes: + use_ip_aliases (bool): + Whether alias IPs will be used for pod IPs in + the cluster. + create_subnetwork (bool): + Whether a new subnetwork will be created automatically for + the cluster. + + This field is only applicable when ``use_ip_aliases`` is + true. + subnetwork_name (str): + A custom subnetwork name to be used if ``create_subnetwork`` + is true. If this field is empty, then an automatic name will + be chosen for the new subnetwork. + cluster_ipv4_cidr (str): + This field is deprecated, use cluster_ipv4_cidr_block. + node_ipv4_cidr (str): + This field is deprecated, use node_ipv4_cidr_block. + services_ipv4_cidr (str): + This field is deprecated, use services_ipv4_cidr_block. + cluster_secondary_range_name (str): + The name of the secondary range to be used for the cluster + CIDR block. The secondary range will be used for pod IP + addresses. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases is true + and create_subnetwork is false. + services_secondary_range_name (str): + The name of the secondary range to be used as for the + services CIDR block. The secondary range will be used for + service ClusterIPs. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases is true + and create_subnetwork is false. + cluster_ipv4_cidr_block (str): + The IP address range for the cluster pod IPs. If this field + is set, then ``cluster.cluster_ipv4_cidr`` must be left + blank. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + node_ipv4_cidr_block (str): + The IP address range of the instance IPs in this cluster. + + This is applicable only if ``create_subnetwork`` is true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + services_ipv4_cidr_block (str): + The IP address range of the services IPs in this cluster. If + blank, a range will be automatically chosen with the default + size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + tpu_ipv4_cidr_block (str): + The IP address range of the Cloud TPUs in this cluster. If + unspecified, a range will be automatically chosen with the + default size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + If unspecified, the range will use the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + """ + + use_ip_aliases = proto.Field(proto.BOOL, number=1) + + create_subnetwork = proto.Field(proto.BOOL, number=2) + + subnetwork_name = proto.Field(proto.STRING, number=3) + + cluster_ipv4_cidr = proto.Field(proto.STRING, number=4) + + node_ipv4_cidr = proto.Field(proto.STRING, number=5) + + services_ipv4_cidr = proto.Field(proto.STRING, number=6) + + cluster_secondary_range_name = proto.Field(proto.STRING, number=7) + + services_secondary_range_name = proto.Field(proto.STRING, number=8) + + cluster_ipv4_cidr_block = proto.Field(proto.STRING, number=9) + + node_ipv4_cidr_block = proto.Field(proto.STRING, number=10) + + services_ipv4_cidr_block = proto.Field(proto.STRING, number=11) + + tpu_ipv4_cidr_block = proto.Field(proto.STRING, number=13) + + +class Cluster(proto.Message): + r"""A Google Kubernetes Engine cluster. + + Attributes: + name (str): + The name of this cluster. The name must be unique within + this project and location (e.g. zone or region), and can be + up to 40 characters with the following restrictions: + + - Lowercase letters, numbers, and hyphens only. + - Must start with a letter. + - Must end with a number or a letter. + description (str): + An optional description of this cluster. + initial_node_count (int): + The number of nodes to create in this cluster. You must + ensure that your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. For requests, this + field should only be used in lieu of a "node_pool" object, + since this configuration (along with the "node_config") will + be used to create a "NodePool" object with an auto-generated + name. Do not use this and a node_pool at the same time. + + This field is deprecated, use node_pool.initial_node_count + instead. + node_config (~.cluster_service.NodeConfig): + Parameters used in creating the cluster's nodes. For + requests, this field should only be used in lieu of a + "node_pool" object, since this configuration (along with the + "initial_node_count") will be used to create a "NodePool" + object with an auto-generated name. Do not use this and a + node_pool at the same time. For responses, this field will + be populated with the node configuration of the first node + pool. (For configuration of each node pool, see + ``node_pool.config``) + + If unspecified, the defaults are used. This field is + deprecated, use node_pool.config instead. + master_auth (~.cluster_service.MasterAuth): + The authentication information for accessing the master + endpoint. If unspecified, the defaults are used: For + clusters before v1.12, if master_auth is unspecified, + ``username`` will be set to "admin", a random password will + be generated, and a client certificate will be issued. + logging_service (str): + The logging service the cluster should use to write logs. + Currently available options: + + - "logging.googleapis.com/kubernetes" - the Google Cloud + Logging service with Kubernetes-native resource model + - ``logging.googleapis.com`` - the Google Cloud Logging + service. + - ``none`` - no logs will be exported from the cluster. + - if left as an empty string,\ ``logging.googleapis.com`` + will be used. + monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - ``monitoring.googleapis.com`` - the Google Cloud + Monitoring service. + - ``none`` - no metrics will be exported from the cluster. + - if left as an empty string, ``monitoring.googleapis.com`` + will be used. + network (str): + The name of the Google Compute Engine + `network `__ + to which the cluster is connected. If left unspecified, the + ``default`` network will be used. + cluster_ipv4_cidr (str): + The IP address range of the container pods in this cluster, + in + `CIDR `__ + notation (e.g. ``10.96.0.0/14``). Leave blank to have one + automatically chosen or specify a ``/14`` block in + ``10.0.0.0/8``. + addons_config (~.cluster_service.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + subnetwork (str): + The name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. + node_pools (Sequence[~.cluster_service.NodePool]): + The node pools associated with this cluster. This field + should not be set if "node_config" or "initial_node_count" + are specified. + locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. + enable_kubernetes_alpha (bool): + Kubernetes alpha features are enabled on this + cluster. This includes alpha API groups (e.g. + v1alpha1) and features that may not be + production ready in the kubernetes version of + the master and nodes. The cluster has no SLA for + uptime and master/node upgrades are disabled. + Alpha enabled clusters are automatically deleted + thirty days after creation. + resource_labels (Sequence[~.cluster_service.Cluster.ResourceLabelsEntry]): + The resource labels for the cluster to use to + annotate any related Google Compute Engine + resources. + label_fingerprint (str): + The fingerprint of the set of labels for this + cluster. + legacy_abac (~.cluster_service.LegacyAbac): + Configuration for the legacy ABAC + authorization mode. + network_policy (~.cluster_service.NetworkPolicy): + Configuration options for the NetworkPolicy + feature. + ip_allocation_policy (~.cluster_service.IPAllocationPolicy): + Configuration for cluster IP allocation. + master_authorized_networks_config (~.cluster_service.MasterAuthorizedNetworksConfig): + The configuration options for master + authorized networks feature. + maintenance_policy (~.cluster_service.MaintenancePolicy): + Configure the maintenance policy for this + cluster. + binary_authorization (~.cluster_service.BinaryAuthorization): + Configuration for Binary Authorization. + autoscaling (~.cluster_service.ClusterAutoscaling): + Cluster-level autoscaling configuration. + network_config (~.cluster_service.NetworkConfig): + Configuration for cluster networking. + default_max_pods_constraint (~.cluster_service.MaxPodsConstraint): + The default constraint on the maximum number + of pods that can be run simultaneously on a node + in the node pool of this cluster. Only honored + if cluster created with IP Alias support. + resource_usage_export_config (~.cluster_service.ResourceUsageExportConfig): + Configuration for exporting resource usages. + Resource usage export is disabled when this + config is unspecified. + authenticator_groups_config (~.cluster_service.AuthenticatorGroupsConfig): + Configuration controlling RBAC group + membership information. + private_cluster_config (~.cluster_service.PrivateClusterConfig): + Configuration for private cluster. + database_encryption (~.cluster_service.DatabaseEncryption): + Configuration of etcd encryption. + vertical_pod_autoscaling (~.cluster_service.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + self_link (str): + [Output only] Server-defined URL for the resource. + zone (str): + [Output only] The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field is deprecated, use + location instead. + endpoint (str): + [Output only] The IP address of this cluster's master + endpoint. The endpoint can be accessed from the internet at + ``https://username:password@endpoint/``. + + See the ``masterAuth`` property of this resource for + username and password information. + initial_cluster_version (str): + The initial Kubernetes version for this + cluster. Valid versions are those found in + validMasterVersions returned by getServerConfig. + The version can be upgraded over time; such + upgrades are reflected in currentMasterVersion + and currentNodeVersion. + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "","-": picks the default + Kubernetes version + current_master_version (str): + [Output only] The current software version of the master + endpoint. + current_node_version (str): + [Output only] Deprecated, use + `NodePools.version `__ + instead. The current version of the node software + components. If they are currently at multiple versions + because they're in the process of being upgraded, this + reflects the minimum version of all nodes. + create_time (str): + [Output only] The time the cluster was created, in + `RFC3339 `__ text + format. + status (~.cluster_service.Cluster.Status): + [Output only] The current status of this cluster. + status_message (str): + [Output only] Additional information about the current + status of this cluster, if available. + node_ipv4_cidr_size (int): + [Output only] The size of the address space on each node for + hosting containers. This is provisioned from within the + ``container_ipv4_cidr`` range. This field will only be set + when cluster is in route-based network mode. + services_ipv4_cidr (str): + [Output only] The IP address range of the Kubernetes + services in this cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). Service addresses are + typically put in the last ``/16`` from the container CIDR. + instance_group_urls (Sequence[str]): + Deprecated. Use node_pools.instance_group_urls. + current_node_count (int): + [Output only] The number of nodes currently in the cluster. + Deprecated. Call Kubernetes API directly to retrieve node + information. + expire_time (str): + [Output only] The time the cluster will be automatically + deleted in + `RFC3339 `__ text + format. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + enable_tpu (bool): + Enable the ability to use Cloud TPUs in this + cluster. + tpu_ipv4_cidr_block (str): + [Output only] The IP address range of the Cloud TPUs in this + cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). + conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current cluster + state. + """ + + class Status(proto.Enum): + r"""The current status of the cluster.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RECONCILING = 3 + STOPPING = 4 + ERROR = 5 + DEGRADED = 6 + + name = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=2) + + initial_node_count = proto.Field(proto.INT32, number=3) + + node_config = proto.Field(proto.MESSAGE, number=4, message=NodeConfig,) + + master_auth = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,) + + logging_service = proto.Field(proto.STRING, number=6) + + monitoring_service = proto.Field(proto.STRING, number=7) + + network = proto.Field(proto.STRING, number=8) + + cluster_ipv4_cidr = proto.Field(proto.STRING, number=9) + + addons_config = proto.Field(proto.MESSAGE, number=10, message=AddonsConfig,) + + subnetwork = proto.Field(proto.STRING, number=11) + + node_pools = proto.RepeatedField(proto.MESSAGE, number=12, message="NodePool",) + + locations = proto.RepeatedField(proto.STRING, number=13) + + enable_kubernetes_alpha = proto.Field(proto.BOOL, number=14) + + resource_labels = proto.MapField(proto.STRING, proto.STRING, number=15) + + label_fingerprint = proto.Field(proto.STRING, number=16) + + legacy_abac = proto.Field(proto.MESSAGE, number=18, message=LegacyAbac,) + + network_policy = proto.Field(proto.MESSAGE, number=19, message=NetworkPolicy,) + + ip_allocation_policy = proto.Field( + proto.MESSAGE, number=20, message=IPAllocationPolicy, + ) + + master_authorized_networks_config = proto.Field( + proto.MESSAGE, number=22, message=MasterAuthorizedNetworksConfig, + ) + + maintenance_policy = proto.Field( + proto.MESSAGE, number=23, message="MaintenancePolicy", + ) + + binary_authorization = proto.Field( + proto.MESSAGE, number=24, message=BinaryAuthorization, + ) + + autoscaling = proto.Field(proto.MESSAGE, number=26, message="ClusterAutoscaling",) + + network_config = proto.Field(proto.MESSAGE, number=27, message="NetworkConfig",) + + default_max_pods_constraint = proto.Field( + proto.MESSAGE, number=30, message="MaxPodsConstraint", + ) + + resource_usage_export_config = proto.Field( + proto.MESSAGE, number=33, message="ResourceUsageExportConfig", + ) + + authenticator_groups_config = proto.Field( + proto.MESSAGE, number=34, message=AuthenticatorGroupsConfig, + ) + + private_cluster_config = proto.Field( + proto.MESSAGE, number=37, message=PrivateClusterConfig, + ) + + database_encryption = proto.Field( + proto.MESSAGE, number=38, message="DatabaseEncryption", + ) + + vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, number=39, message="VerticalPodAutoscaling", + ) + + self_link = proto.Field(proto.STRING, number=100) + + zone = proto.Field(proto.STRING, number=101) + + endpoint = proto.Field(proto.STRING, number=102) + + initial_cluster_version = proto.Field(proto.STRING, number=103) + + current_master_version = proto.Field(proto.STRING, number=104) + + current_node_version = proto.Field(proto.STRING, number=105) + + create_time = proto.Field(proto.STRING, number=106) + + status = proto.Field(proto.ENUM, number=107, enum=Status,) + + status_message = proto.Field(proto.STRING, number=108) + + node_ipv4_cidr_size = proto.Field(proto.INT32, number=109) + + services_ipv4_cidr = proto.Field(proto.STRING, number=110) + + instance_group_urls = proto.RepeatedField(proto.STRING, number=111) + + current_node_count = proto.Field(proto.INT32, number=112) + + expire_time = proto.Field(proto.STRING, number=113) + + location = proto.Field(proto.STRING, number=114) + + enable_tpu = proto.Field(proto.BOOL, number=115) + + tpu_ipv4_cidr_block = proto.Field(proto.STRING, number=116) + + conditions = proto.RepeatedField( + proto.MESSAGE, number=118, message="StatusCondition", + ) + + +class ClusterUpdate(proto.Message): + r"""ClusterUpdate describes an update to the cluster. Exactly one + update can be applied to a cluster with each request, so at most + one field can be provided. + + Attributes: + desired_node_version (str): + The Kubernetes version to change the nodes to + (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + desired_monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - the Google Cloud + Monitoring service with Kubernetes-native resource model + - "monitoring.googleapis.com" - the Google Cloud Monitoring + service + - "none" - no metrics will be exported from the cluster + desired_addons_config (~.cluster_service.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + desired_node_pool_id (str): + The node pool to be upgraded. This field is mandatory if + "desired_node_version", "desired_image_family" or + "desired_node_pool_autoscaling" is specified and there is + more than one node pool on the cluster. + desired_image_type (str): + The desired image type for the node pool. NOTE: Set the + "desired_node_pool" field as well. + desired_database_encryption (~.cluster_service.DatabaseEncryption): + Configuration of etcd encryption. + desired_node_pool_autoscaling (~.cluster_service.NodePoolAutoscaling): + Autoscaler configuration for the node pool specified in + desired_node_pool_id. If there is only one pool in the + cluster and desired_node_pool_id is not provided then the + change applies to that single node pool. + desired_locations (Sequence[str]): + The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing the + locations a cluster is in will result in nodes being either + created or removed from the cluster, depending on whether + locations are being added or removed. + + This list must always include the cluster's primary zone. + desired_master_authorized_networks_config (~.cluster_service.MasterAuthorizedNetworksConfig): + The desired configuration options for master + authorized networks feature. + desired_cluster_autoscaling (~.cluster_service.ClusterAutoscaling): + Cluster-level autoscaling configuration. + desired_binary_authorization (~.cluster_service.BinaryAuthorization): + The desired configuration options for the + Binary Authorization feature. + desired_logging_service (str): + The logging service the cluster should use to write logs. + Currently available options: + + - "logging.googleapis.com/kubernetes" - the Google Cloud + Logging service with Kubernetes-native resource model + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no logs will be exported from the cluster + desired_resource_usage_export_config (~.cluster_service.ResourceUsageExportConfig): + The desired configuration for exporting + resource usage. + desired_vertical_pod_autoscaling (~.cluster_service.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + desired_intra_node_visibility_config (~.cluster_service.IntraNodeVisibilityConfig): + The desired config of Intra-node visibility. + desired_master_version (str): + The Kubernetes version to change the master + to. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + """ + + desired_node_version = proto.Field(proto.STRING, number=4) + + desired_monitoring_service = proto.Field(proto.STRING, number=5) + + desired_addons_config = proto.Field(proto.MESSAGE, number=6, message=AddonsConfig,) + + desired_node_pool_id = proto.Field(proto.STRING, number=7) + + desired_image_type = proto.Field(proto.STRING, number=8) + + desired_database_encryption = proto.Field( + proto.MESSAGE, number=46, message="DatabaseEncryption", + ) + + desired_node_pool_autoscaling = proto.Field( + proto.MESSAGE, number=9, message="NodePoolAutoscaling", + ) + + desired_locations = proto.RepeatedField(proto.STRING, number=10) + + desired_master_authorized_networks_config = proto.Field( + proto.MESSAGE, number=12, message=MasterAuthorizedNetworksConfig, + ) + + desired_cluster_autoscaling = proto.Field( + proto.MESSAGE, number=15, message="ClusterAutoscaling", + ) + + desired_binary_authorization = proto.Field( + proto.MESSAGE, number=16, message=BinaryAuthorization, + ) + + desired_logging_service = proto.Field(proto.STRING, number=19) + + desired_resource_usage_export_config = proto.Field( + proto.MESSAGE, number=21, message="ResourceUsageExportConfig", + ) + + desired_vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, number=22, message="VerticalPodAutoscaling", + ) + + desired_intra_node_visibility_config = proto.Field( + proto.MESSAGE, number=26, message="IntraNodeVisibilityConfig", + ) + + desired_master_version = proto.Field(proto.STRING, number=100) + + +class Operation(proto.Message): + r"""This operation resource represents operations that may have + happened or are happening on the cluster. All fields are output + only. + + Attributes: + name (str): + The server-assigned ID for the operation. + zone (str): + The name of the Google Compute Engine + `zone `__ + in which the operation is taking place. This field is + deprecated, use location instead. + operation_type (~.cluster_service.Operation.Type): + The operation type. + status (~.cluster_service.Operation.Status): + The current status of the operation. + detail (str): + Detailed operation progress, if available. + status_message (str): + If an error has occurred, a textual + description of the error. + self_link (str): + Server-defined URL for the resource. + target_link (str): + Server-defined URL for the target of the + operation. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + start_time (str): + [Output only] The time the operation started, in + `RFC3339 `__ text + format. + end_time (str): + [Output only] The time the operation completed, in + `RFC3339 `__ text + format. + cluster_conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current cluster + state. + nodepool_conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current node pool + state. + """ + + class Status(proto.Enum): + r"""Current status of the operation.""" + STATUS_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + ABORTING = 4 + + class Type(proto.Enum): + r"""Operation type.""" + TYPE_UNSPECIFIED = 0 + CREATE_CLUSTER = 1 + DELETE_CLUSTER = 2 + UPGRADE_MASTER = 3 + UPGRADE_NODES = 4 + REPAIR_CLUSTER = 5 + UPDATE_CLUSTER = 6 + CREATE_NODE_POOL = 7 + DELETE_NODE_POOL = 8 + SET_NODE_POOL_MANAGEMENT = 9 + AUTO_REPAIR_NODES = 10 + AUTO_UPGRADE_NODES = 11 + SET_LABELS = 12 + SET_MASTER_AUTH = 13 + SET_NODE_POOL_SIZE = 14 + SET_NETWORK_POLICY = 15 + SET_MAINTENANCE_POLICY = 16 + + name = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + operation_type = proto.Field(proto.ENUM, number=3, enum=Type,) + + status = proto.Field(proto.ENUM, number=4, enum=Status,) + + detail = proto.Field(proto.STRING, number=8) + + status_message = proto.Field(proto.STRING, number=5) + + self_link = proto.Field(proto.STRING, number=6) + + target_link = proto.Field(proto.STRING, number=7) + + location = proto.Field(proto.STRING, number=9) + + start_time = proto.Field(proto.STRING, number=10) + + end_time = proto.Field(proto.STRING, number=11) + + cluster_conditions = proto.RepeatedField( + proto.MESSAGE, number=13, message="StatusCondition", + ) + + nodepool_conditions = proto.RepeatedField( + proto.MESSAGE, number=14, message="StatusCondition", + ) + + +class CreateClusterRequest(proto.Message): + r"""CreateClusterRequest creates a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster (~.cluster_service.Cluster): + Required. A `cluster + resource `__ + parent (str): + The parent (project and location) where the cluster will be + created. Specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster = proto.Field(proto.MESSAGE, number=3, message=Cluster,) + + parent = proto.Field(proto.STRING, number=5) + + +class GetClusterRequest(proto.Message): + r"""GetClusterRequest gets the settings of a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + retrieve. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=5) + + +class UpdateClusterRequest(proto.Message): + r"""UpdateClusterRequest updates the settings of a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + update (~.cluster_service.ClusterUpdate): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + update = proto.Field(proto.MESSAGE, number=4, message=ClusterUpdate,) + + name = proto.Field(proto.STRING, number=5) + + +class UpdateNodePoolRequest(proto.Message): + r"""UpdateNodePoolRequests update a node pool's image and/or + version. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + upgrade. This field has been deprecated and + replaced by the name field. + node_version (str): + Required. The Kubernetes version to change + the nodes to (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + image_type (str): + Required. The desired image type for the node + pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to update. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + node_version = proto.Field(proto.STRING, number=5) + + image_type = proto.Field(proto.STRING, number=6) + + name = proto.Field(proto.STRING, number=8) + + +class SetNodePoolAutoscalingRequest(proto.Message): + r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of + a node pool. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + upgrade. This field has been deprecated and + replaced by the name field. + autoscaling (~.cluster_service.NodePoolAutoscaling): + Required. Autoscaling configuration for the + node pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to set autoscaler settings. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + autoscaling = proto.Field(proto.MESSAGE, number=5, message="NodePoolAutoscaling",) + + name = proto.Field(proto.STRING, number=6) + + +class SetLoggingServiceRequest(proto.Message): + r"""SetLoggingServiceRequest sets the logging service of a + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + logging_service (str): + Required. The logging service the cluster should use to + write metrics. Currently available options: + + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no metrics will be exported from the cluster + name (str): + The name (project, location, cluster) of the cluster to set + logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + logging_service = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=5) + + +class SetMonitoringServiceRequest(proto.Message): + r"""SetMonitoringServiceRequest sets the monitoring service of a + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + monitoring_service (str): + Required. The monitoring service the cluster should use to + write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - the Google Cloud + Monitoring service with Kubernetes-native resource model + - "monitoring.googleapis.com" - the Google Cloud Monitoring + service + - "none" - no metrics will be exported from the cluster + name (str): + The name (project, location, cluster) of the cluster to set + monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + monitoring_service = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class SetAddonsConfigRequest(proto.Message): + r"""SetAddonsConfigRequest sets the addons associated with the + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + addons_config (~.cluster_service.AddonsConfig): + Required. The desired configurations for the + various addons available to run in the cluster. + name (str): + The name (project, location, cluster) of the cluster to set + addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + addons_config = proto.Field(proto.MESSAGE, number=4, message=AddonsConfig,) + + name = proto.Field(proto.STRING, number=6) + + +class SetLocationsRequest(proto.Message): + r"""SetLocationsRequest sets the locations of the cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + locations (Sequence[str]): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing the + locations a cluster is in will result in nodes being either + created or removed from the cluster, depending on whether + locations are being added or removed. + + This list must always include the cluster's primary zone. + name (str): + The name (project, location, cluster) of the cluster to set + locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + locations = proto.RepeatedField(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class UpdateMasterRequest(proto.Message): + r"""UpdateMasterRequest updates the master of the cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + master_version (str): + Required. The Kubernetes version to change + the master to. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + master_version = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=7) + + +class SetMasterAuthRequest(proto.Message): + r"""SetMasterAuthRequest updates the admin password of a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + action (~.cluster_service.SetMasterAuthRequest.Action): + Required. The exact form of action to be + taken on the master auth. + update (~.cluster_service.MasterAuth): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to set + auth. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + class Action(proto.Enum): + r"""Operation type: what type update to perform.""" + UNKNOWN = 0 + SET_PASSWORD = 1 + GENERATE_PASSWORD = 2 + SET_USERNAME = 3 + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + action = proto.Field(proto.ENUM, number=4, enum=Action,) + + update = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,) + + name = proto.Field(proto.STRING, number=7) + + +class DeleteClusterRequest(proto.Message): + r"""DeleteClusterRequest deletes a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + delete. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=4) + + +class ListClustersRequest(proto.Message): + r"""ListClustersRequest lists clusters. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the clusters will be + listed. Specified in the format ``projects/*/locations/*``. + Location "-" matches all zones and all regions. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + parent = proto.Field(proto.STRING, number=4) + + +class ListClustersResponse(proto.Message): + r"""ListClustersResponse is the result of ListClustersRequest. + + Attributes: + clusters (Sequence[~.cluster_service.Cluster]): + A list of clusters in the project in the + specified zone, or across all ones. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + clusters returned may be missing those zones. + """ + + clusters = proto.RepeatedField(proto.MESSAGE, number=1, message=Cluster,) + + missing_zones = proto.RepeatedField(proto.STRING, number=2) + + +class GetOperationRequest(proto.Message): + r"""GetOperationRequest gets a single operation. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + operation_id (str): + Deprecated. The server-assigned ``name`` of the operation. + This field has been deprecated and replaced by the name + field. + name (str): + The name (project, location, operation id) of the operation + to get. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + operation_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=5) + + +class ListOperationsRequest(proto.Message): + r"""ListOperationsRequest lists operations. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This field + has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the operations will + be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all zones + and all regions. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + parent = proto.Field(proto.STRING, number=4) + + +class CancelOperationRequest(proto.Message): + r"""CancelOperationRequest cancels a single operation. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + operation_id (str): + Deprecated. The server-assigned ``name`` of the operation. + This field has been deprecated and replaced by the name + field. + name (str): + The name (project, location, operation id) of the operation + to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + operation_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=4) + + +class ListOperationsResponse(proto.Message): + r"""ListOperationsResponse is the result of + ListOperationsRequest. + + Attributes: + operations (Sequence[~.cluster_service.Operation]): + A list of operations in the project in the + specified zone. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + operations returned may be missing the + operations from those zones. + """ + + operations = proto.RepeatedField(proto.MESSAGE, number=1, message=Operation,) + + missing_zones = proto.RepeatedField(proto.STRING, number=2) + + +class GetServerConfigRequest(proto.Message): + r"""Gets the current Kubernetes Engine service configuration. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated and + replaced by the name field. + name (str): + The name (project and location) of the server config to get, + specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + name = proto.Field(proto.STRING, number=4) + + +class ServerConfig(proto.Message): + r"""Kubernetes Engine service configuration. + + Attributes: + default_cluster_version (str): + Version of Kubernetes the service deploys by + default. + valid_node_versions (Sequence[str]): + List of valid node upgrade target versions. + default_image_type (str): + Default image type. + valid_image_types (Sequence[str]): + List of valid image types. + valid_master_versions (Sequence[str]): + List of valid master versions. + """ + + default_cluster_version = proto.Field(proto.STRING, number=1) + + valid_node_versions = proto.RepeatedField(proto.STRING, number=3) + + default_image_type = proto.Field(proto.STRING, number=4) + + valid_image_types = proto.RepeatedField(proto.STRING, number=5) + + valid_master_versions = proto.RepeatedField(proto.STRING, number=6) + + +class CreateNodePoolRequest(proto.Message): + r"""CreateNodePoolRequest creates a node pool for a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the parent field. + node_pool (~.cluster_service.NodePool): + Required. The node pool to create. + parent (str): + The parent (project, location, cluster id) where the node + pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool = proto.Field(proto.MESSAGE, number=4, message="NodePool",) + + parent = proto.Field(proto.STRING, number=6) + + +class DeleteNodePoolRequest(proto.Message): + r"""DeleteNodePoolRequest deletes a node pool for a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + delete. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class ListNodePoolsRequest(proto.Message): + r"""ListNodePoolsRequest lists the node pool(s) for a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the parent field. + parent (str): + The parent (project, location, cluster id) where the node + pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + parent = proto.Field(proto.STRING, number=5) + + +class GetNodePoolRequest(proto.Message): + r"""GetNodePoolRequest retrieves a node pool for a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + node_pool_id (str): + Deprecated. The name of the node pool. + This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class NodePool(proto.Message): + r"""NodePool contains the name and configuration for a cluster's + node pool. Node pools are a set of nodes (i.e. VM's), with a + common configuration and specification, under the control of the + cluster master. They may have a set of Kubernetes labels applied + to them, which may be used to reference them during pod + scheduling. They may also be resized up or down, to accommodate + the workload. + + Attributes: + name (str): + The name of the node pool. + config (~.cluster_service.NodeConfig): + The node configuration of the pool. + initial_node_count (int): + The initial node count for the pool. You must ensure that + your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. + self_link (str): + [Output only] Server-defined URL for the resource. + version (str): + The version of the Kubernetes of this node. + instance_group_urls (Sequence[str]): + [Output only] The resource URLs of the `managed instance + groups `__ + associated with this node pool. + status (~.cluster_service.NodePool.Status): + [Output only] The status of the nodes in this pool instance. + status_message (str): + [Output only] Additional information about the current + status of this node pool instance, if available. + autoscaling (~.cluster_service.NodePoolAutoscaling): + Autoscaler configuration for this NodePool. + Autoscaler is enabled only if a valid + configuration is present. + management (~.cluster_service.NodeManagement): + NodeManagement configuration for this + NodePool. + max_pods_constraint (~.cluster_service.MaxPodsConstraint): + The constraint on the maximum number of pods + that can be run simultaneously on a node in the + node pool. + conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current node pool + state. + pod_ipv4_cidr_size (int): + [Output only] The pod CIDR block size per node in this node + pool. + """ + + class Status(proto.Enum): + r"""The current status of the node pool instance.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RUNNING_WITH_ERROR = 3 + RECONCILING = 4 + STOPPING = 5 + ERROR = 6 + + name = proto.Field(proto.STRING, number=1) + + config = proto.Field(proto.MESSAGE, number=2, message=NodeConfig,) + + initial_node_count = proto.Field(proto.INT32, number=3) + + self_link = proto.Field(proto.STRING, number=100) + + version = proto.Field(proto.STRING, number=101) + + instance_group_urls = proto.RepeatedField(proto.STRING, number=102) + + status = proto.Field(proto.ENUM, number=103, enum=Status,) + + status_message = proto.Field(proto.STRING, number=104) + + autoscaling = proto.Field(proto.MESSAGE, number=4, message="NodePoolAutoscaling",) + + management = proto.Field(proto.MESSAGE, number=5, message="NodeManagement",) + + max_pods_constraint = proto.Field( + proto.MESSAGE, number=6, message="MaxPodsConstraint", + ) + + conditions = proto.RepeatedField( + proto.MESSAGE, number=105, message="StatusCondition", + ) + + pod_ipv4_cidr_size = proto.Field(proto.INT32, number=7) + + +class NodeManagement(proto.Message): + r"""NodeManagement defines the set of node management services + turned on for the node pool. + + Attributes: + auto_upgrade (bool): + A flag that specifies whether node auto- + pgrade is enabled for the node pool. If enabled, + node auto-upgrade helps keep the nodes in your + node pool up to date with the latest release + version of Kubernetes. + auto_repair (bool): + A flag that specifies whether the node auto- + epair is enabled for the node pool. If enabled, + the nodes in this node pool will be monitored + and, if they fail health checks too many times, + an automatic repair action will be triggered. + upgrade_options (~.cluster_service.AutoUpgradeOptions): + Specifies the Auto Upgrade knobs for the node + pool. + """ + + auto_upgrade = proto.Field(proto.BOOL, number=1) + + auto_repair = proto.Field(proto.BOOL, number=2) + + upgrade_options = proto.Field( + proto.MESSAGE, number=10, message="AutoUpgradeOptions", + ) + + +class AutoUpgradeOptions(proto.Message): + r"""AutoUpgradeOptions defines the set of options for the user to + control how the Auto Upgrades will proceed. + + Attributes: + auto_upgrade_start_time (str): + [Output only] This field is set when upgrades are about to + commence with the approximate start time for the upgrades, + in `RFC3339 `__ text + format. + description (str): + [Output only] This field is set when upgrades are about to + commence with the description of the upgrade. + """ + + auto_upgrade_start_time = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=2) + + +class MaintenancePolicy(proto.Message): + r"""MaintenancePolicy defines the maintenance policy to be used + for the cluster. + + Attributes: + window (~.cluster_service.MaintenanceWindow): + Specifies the maintenance window in which + maintenance may be performed. + resource_version (str): + A hash identifying the version of this + policy, so that updates to fields of the policy + won't accidentally undo intermediate changes + (and so that users of the API unaware of some + fields won't accidentally remove other fields). + Make a get() request to the cluster + to get the current resource version and include + it with requests to set the policy. + """ + + window = proto.Field(proto.MESSAGE, number=1, message="MaintenanceWindow",) + + resource_version = proto.Field(proto.STRING, number=3) + + +class MaintenanceWindow(proto.Message): + r"""MaintenanceWindow defines the maintenance window to be used + for the cluster. + + Attributes: + daily_maintenance_window (~.cluster_service.DailyMaintenanceWindow): + DailyMaintenanceWindow specifies a daily + maintenance operation window. + recurring_window (~.cluster_service.RecurringTimeWindow): + RecurringWindow specifies some number of + recurring time periods for maintenance to occur. + The time windows may be overlapping. If no + maintenance windows are set, maintenance can + occur at any time. + maintenance_exclusions (Sequence[~.cluster_service.MaintenanceWindow.MaintenanceExclusionsEntry]): + Exceptions to maintenance window. Non- + mergency maintenance should not occur in these + windows. + """ + + daily_maintenance_window = proto.Field( + proto.MESSAGE, number=2, message="DailyMaintenanceWindow", + ) + + recurring_window = proto.Field( + proto.MESSAGE, number=3, message="RecurringTimeWindow", + ) + + maintenance_exclusions = proto.MapField( + proto.STRING, proto.MESSAGE, number=4, message="TimeWindow", + ) + + +class TimeWindow(proto.Message): + r"""Represents an arbitrary window of time. + + Attributes: + start_time (~.timestamp.Timestamp): + The time that the window first starts. + end_time (~.timestamp.Timestamp): + The time that the window ends. The end time + should take place after the start time. + """ + + start_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + +class RecurringTimeWindow(proto.Message): + r"""Represents an arbitrary window of time that recurs. + + Attributes: + window (~.cluster_service.TimeWindow): + The window of the first recurrence. + recurrence (str): + An RRULE + (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) + for how this window reccurs. They go on for the + span of time between the start and end time. + + For example, to have something repeat every + weekday, you'd use: + FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR To + repeat some window daily (equivalent to the + DailyMaintenanceWindow): + FREQ=DAILY + For the first weekend of every month: + FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU + This specifies how frequently the window starts. + Eg, if you wanted to have a 9-5 UTC-4 window + every weekday, you'd use something like: + start time = 2019-01-01T09:00:00-0400 + end time = 2019-01-01T17:00:00-0400 + recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR + + Windows can span multiple days. Eg, to make the + window encompass every weekend from midnight + Saturday till the last minute of Sunday UTC: + + start time = 2019-01-05T00:00:00Z + end time = 2019-01-07T23:59:00Z + recurrence = FREQ=WEEKLY;BYDAY=SA + + Note the start and end time's specific dates are + largely arbitrary except to specify duration of + the window and when it first starts. The FREQ + values of HOURLY, MINUTELY, and SECONDLY are not + supported. + """ + + window = proto.Field(proto.MESSAGE, number=1, message=TimeWindow,) + + recurrence = proto.Field(proto.STRING, number=2) + + +class DailyMaintenanceWindow(proto.Message): + r"""Time window specified for daily maintenance operations. + + Attributes: + start_time (str): + Time within the maintenance window to start the maintenance + operations. Time format should be in + `RFC3339 `__ format + "HH:MM", where HH : [00-23] and MM : [00-59] GMT. + duration (str): + [Output only] Duration of the time window, automatically + chosen to be smallest possible in the given scenario. + Duration will be in + `RFC3339 `__ format + "PTnHnMnS". + """ + + start_time = proto.Field(proto.STRING, number=2) + + duration = proto.Field(proto.STRING, number=3) + + +class SetNodePoolManagementRequest(proto.Message): + r"""SetNodePoolManagementRequest sets the node management + properties of a node pool. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + update. This field has been deprecated and + replaced by the name field. + management (~.cluster_service.NodeManagement): + Required. NodeManagement configuration for + the node pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set management properties. Specified in the + format ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + management = proto.Field(proto.MESSAGE, number=5, message=NodeManagement,) + + name = proto.Field(proto.STRING, number=7) + + +class SetNodePoolSizeRequest(proto.Message): + r"""SetNodePoolSizeRequest sets the size a node + pool. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + update. This field has been deprecated and + replaced by the name field. + node_count (int): + Required. The desired node count for the + pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set size. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + node_count = proto.Field(proto.INT32, number=5) + + name = proto.Field(proto.STRING, number=7) + + +class RollbackNodePoolUpgradeRequest(proto.Message): + r"""RollbackNodePoolUpgradeRequest rollbacks the previously + Aborted or Failed NodePool upgrade. This will be an no-op if the + last upgrade successfully completed. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + rollback. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + rollback. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node poll to rollback upgrade. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class ListNodePoolsResponse(proto.Message): + r"""ListNodePoolsResponse is the result of ListNodePoolsRequest. + + Attributes: + node_pools (Sequence[~.cluster_service.NodePool]): + A list of node pools for a cluster. + """ + + node_pools = proto.RepeatedField(proto.MESSAGE, number=1, message=NodePool,) + + +class ClusterAutoscaling(proto.Message): + r"""ClusterAutoscaling contains global, per-cluster information + required by Cluster Autoscaler to automatically adjust the size + of the cluster and create/delete + node pools based on the current needs. + + Attributes: + enable_node_autoprovisioning (bool): + Enables automatic node pool creation and + deletion. + resource_limits (Sequence[~.cluster_service.ResourceLimit]): + Contains global constraints regarding minimum + and maximum amount of resources in the cluster. + autoprovisioning_node_pool_defaults (~.cluster_service.AutoprovisioningNodePoolDefaults): + AutoprovisioningNodePoolDefaults contains + defaults for a node pool created by NAP. + autoprovisioning_locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the NodePool's nodes can be created by NAP. + """ + + enable_node_autoprovisioning = proto.Field(proto.BOOL, number=1) + + resource_limits = proto.RepeatedField( + proto.MESSAGE, number=2, message="ResourceLimit", + ) + + autoprovisioning_node_pool_defaults = proto.Field( + proto.MESSAGE, number=4, message="AutoprovisioningNodePoolDefaults", + ) + + autoprovisioning_locations = proto.RepeatedField(proto.STRING, number=5) + + +class AutoprovisioningNodePoolDefaults(proto.Message): + r"""AutoprovisioningNodePoolDefaults contains defaults for a node + pool created by NAP. + + Attributes: + oauth_scopes (Sequence[str]): + Scopes that are used by NAP when creating node pools. If + oauth_scopes are specified, service_account should be empty. + service_account (str): + The Google Cloud Platform Service Account to be used by the + node VMs. If service_account is specified, scopes should be + empty. + """ + + oauth_scopes = proto.RepeatedField(proto.STRING, number=1) + + service_account = proto.Field(proto.STRING, number=2) + + +class ResourceLimit(proto.Message): + r"""Contains information about amount of some resource in the + cluster. For memory, value should be in GB. + + Attributes: + resource_type (str): + Resource name "cpu", "memory" or gpu-specific + string. + minimum (int): + Minimum amount of the resource in the + cluster. + maximum (int): + Maximum amount of the resource in the + cluster. + """ + + resource_type = proto.Field(proto.STRING, number=1) + + minimum = proto.Field(proto.INT64, number=2) + + maximum = proto.Field(proto.INT64, number=3) + + +class NodePoolAutoscaling(proto.Message): + r"""NodePoolAutoscaling contains information required by cluster + autoscaler to adjust the size of the node pool to the current + cluster usage. + + Attributes: + enabled (bool): + Is autoscaling enabled for this node pool. + min_node_count (int): + Minimum number of nodes in the NodePool. Must be >= 1 and <= + max_node_count. + max_node_count (int): + Maximum number of nodes in the NodePool. Must be >= + min_node_count. There has to enough quota to scale up the + cluster. + autoprovisioned (bool): + Can this node pool be deleted automatically. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + min_node_count = proto.Field(proto.INT32, number=2) + + max_node_count = proto.Field(proto.INT32, number=3) + + autoprovisioned = proto.Field(proto.BOOL, number=4) + + +class SetLabelsRequest(proto.Message): + r"""SetLabelsRequest sets the Google Cloud Platform labels on a + Google Container Engine cluster, which will in turn set them for + Google Compute Engine resources used by that cluster + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + resource_labels (Sequence[~.cluster_service.SetLabelsRequest.ResourceLabelsEntry]): + Required. The labels to set for that cluster. + label_fingerprint (str): + Required. The fingerprint of the previous set + of labels for this resource, used to detect + conflicts. The fingerprint is initially + generated by Kubernetes Engine and changes after + every request to modify or update labels. You + must always provide an up-to-date fingerprint + hash when updating or changing labels. Make a + get() request to the resource to + get the latest fingerprint. + name (str): + The name (project, location, cluster id) of the cluster to + set labels. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + resource_labels = proto.MapField(proto.STRING, proto.STRING, number=4) + + label_fingerprint = proto.Field(proto.STRING, number=5) + + name = proto.Field(proto.STRING, number=7) + + +class SetLegacyAbacRequest(proto.Message): + r"""SetLegacyAbacRequest enables or disables the ABAC + authorization mechanism for a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + update. This field has been deprecated and + replaced by the name field. + enabled (bool): + Required. Whether ABAC authorization will be + enabled in the cluster. + name (str): + The name (project, location, cluster id) of the cluster to + set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + enabled = proto.Field(proto.BOOL, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class StartIPRotationRequest(proto.Message): + r"""StartIPRotationRequest creates a new IP for the cluster and + then performs a node upgrade on each node pool to point to the + new IP. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, cluster id) of the cluster to + start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + rotate_credentials (bool): + Whether to rotate credentials during IP + rotation. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=6) + + rotate_credentials = proto.Field(proto.BOOL, number=7) + + +class CompleteIPRotationRequest(proto.Message): + r"""CompleteIPRotationRequest moves the cluster master back into + single-IP mode. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, cluster id) of the cluster to + complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=7) + + +class AcceleratorConfig(proto.Message): + r"""AcceleratorConfig represents a Hardware Accelerator request. + + Attributes: + accelerator_count (int): + The number of the accelerator cards exposed + to an instance. + accelerator_type (str): + The accelerator type resource name. List of supported + accelerators + `here `__ + """ + + accelerator_count = proto.Field(proto.INT64, number=1) + + accelerator_type = proto.Field(proto.STRING, number=2) + + +class SetNetworkPolicyRequest(proto.Message): + r"""SetNetworkPolicyRequest enables/disables network policy for a + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + network_policy (~.cluster_service.NetworkPolicy): + Required. Configuration options for the + NetworkPolicy feature. + name (str): + The name (project, location, cluster id) of the cluster to + set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + network_policy = proto.Field(proto.MESSAGE, number=4, message=NetworkPolicy,) + + name = proto.Field(proto.STRING, number=6) + + +class SetMaintenancePolicyRequest(proto.Message): + r"""SetMaintenancePolicyRequest sets the maintenance policy for a + cluster. + + Attributes: + project_id (str): + Required. The Google Developers Console `project ID or + project + number `__. + zone (str): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + cluster_id (str): + Required. The name of the cluster to update. + maintenance_policy (~.cluster_service.MaintenancePolicy): + Required. The maintenance policy to be set + for the cluster. An empty field clears the + existing maintenance policy. + name (str): + The name (project, location, cluster id) of the cluster to + set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + maintenance_policy = proto.Field( + proto.MESSAGE, number=4, message=MaintenancePolicy, + ) + + name = proto.Field(proto.STRING, number=5) + + +class StatusCondition(proto.Message): + r"""StatusCondition describes why a cluster or a node pool has a + certain status (e.g., ERROR or DEGRADED). + + Attributes: + code (~.cluster_service.StatusCondition.Code): + Machine-friendly representation of the + condition + message (str): + Human-friendly representation of the + condition + """ + + class Code(proto.Enum): + r"""Code for each condition""" + UNKNOWN = 0 + GCE_STOCKOUT = 1 + GKE_SERVICE_ACCOUNT_DELETED = 2 + GCE_QUOTA_EXCEEDED = 3 + SET_BY_OPERATOR = 4 + CLOUD_KMS_KEY_ERROR = 7 + + code = proto.Field(proto.ENUM, number=1, enum=Code,) + + message = proto.Field(proto.STRING, number=2) + + +class NetworkConfig(proto.Message): + r"""NetworkConfig reports the relative names of network & + subnetwork. + + Attributes: + network (str): + Output only. The relative name of the Google Compute Engine + [network]`google.container.v1.NetworkConfig.network `__ + to which the cluster is connected. Example: + projects/my-project/global/networks/my-network + subnetwork (str): + Output only. The relative name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. Example: + projects/my-project/regions/us-central1/subnetworks/my-subnet + enable_intra_node_visibility (bool): + Whether Intra-node visibility is enabled for + this cluster. This makes same node pod to pod + traffic visible for VPC network. + """ + + network = proto.Field(proto.STRING, number=1) + + subnetwork = proto.Field(proto.STRING, number=2) + + enable_intra_node_visibility = proto.Field(proto.BOOL, number=5) + + +class IntraNodeVisibilityConfig(proto.Message): + r"""IntraNodeVisibilityConfig contains the desired config of the + intra-node visibility on this cluster. + + Attributes: + enabled (bool): + Enables intra node visibility for this + cluster. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class MaxPodsConstraint(proto.Message): + r"""Constraints applied to pods. + + Attributes: + max_pods_per_node (int): + Constraint enforced on the max num of pods + per node. + """ + + max_pods_per_node = proto.Field(proto.INT64, number=1) + + +class DatabaseEncryption(proto.Message): + r"""Configuration of etcd encryption. + + Attributes: + state (~.cluster_service.DatabaseEncryption.State): + Denotes the state of etcd encryption. + key_name (str): + Name of CloudKMS key to use for the + encryption of secrets in etcd. Ex. projects/my- + project/locations/global/keyRings/my- + ring/cryptoKeys/my-key + """ + + class State(proto.Enum): + r"""State of etcd encryption.""" + UNKNOWN = 0 + ENCRYPTED = 1 + DECRYPTED = 2 + + state = proto.Field(proto.ENUM, number=2, enum=State,) + + key_name = proto.Field(proto.STRING, number=1) + + +class ListUsableSubnetworksRequest(proto.Message): + r"""ListUsableSubnetworksRequest requests the list of usable + subnetworks available to a user for creating clusters. + + Attributes: + parent (str): + The parent project where subnetworks are usable. Specified + in the format ``projects/*``. + filter (str): + Filtering currently only supports equality on the + networkProjectId and must be in the form: + "networkProjectId=[PROJECTID]", where ``networkProjectId`` + is the project which owns the listed subnetworks. This + defaults to the parent project ID. + page_size (int): + The max number of results per page that should be returned. + If the number of available results is larger than + ``page_size``, a ``next_page_token`` is returned which can + be used to get the next page of results in subsequent + requests. Acceptable values are 0 to 500, inclusive. + (Default: 500) + page_token (str): + Specifies a page token to use. Set this to + the nextPageToken returned by previous list + requests to get the next page of results. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListUsableSubnetworksResponse(proto.Message): + r"""ListUsableSubnetworksResponse is the response of + ListUsableSubnetworksRequest. + + Attributes: + subnetworks (Sequence[~.cluster_service.UsableSubnetwork]): + A list of usable subnetworks in the specified + network project. + next_page_token (str): + This token allows you to get the next page of results for + list requests. If the number of results is larger than + ``page_size``, use the ``next_page_token`` as a value for + the query parameter ``page_token`` in the next request. The + value will become empty when there are no more pages. + """ + + @property + def raw_page(self): + return self + + subnetworks = proto.RepeatedField( + proto.MESSAGE, number=1, message="UsableSubnetwork", + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UsableSubnetworkSecondaryRange(proto.Message): + r"""Secondary IP range of a usable subnetwork. + + Attributes: + range_name (str): + The name associated with this subnetwork + secondary range, used when adding an alias IP + range to a VM instance. + ip_cidr_range (str): + The range of IP addresses belonging to this + subnetwork secondary range. + status (~.cluster_service.UsableSubnetworkSecondaryRange.Status): + This field is to determine the status of the + secondary range programmably. + """ + + class Status(proto.Enum): + r"""Status shows the current usage of a secondary IP range.""" + UNKNOWN = 0 + UNUSED = 1 + IN_USE_SERVICE = 2 + IN_USE_SHAREABLE_POD = 3 + IN_USE_MANAGED_POD = 4 + + range_name = proto.Field(proto.STRING, number=1) + + ip_cidr_range = proto.Field(proto.STRING, number=2) + + status = proto.Field(proto.ENUM, number=3, enum=Status,) + + +class UsableSubnetwork(proto.Message): + r"""UsableSubnetwork resource returns the subnetwork name, its + associated network and the primary CIDR range. + + Attributes: + subnetwork (str): + Subnetwork Name. + Example: projects/my-project/regions/us- + central1/subnetworks/my-subnet + network (str): + Network Name. + Example: projects/my-project/global/networks/my- + network + ip_cidr_range (str): + The range of internal addresses that are + owned by this subnetwork. + secondary_ip_ranges (Sequence[~.cluster_service.UsableSubnetworkSecondaryRange]): + Secondary IP ranges. + status_message (str): + A human readable status message representing the reasons for + cases where the caller cannot use the secondary ranges under + the subnet. For example if the secondary_ip_ranges is empty + due to a permission issue, an insufficient permission + message will be given by status_message. + """ + + subnetwork = proto.Field(proto.STRING, number=1) + + network = proto.Field(proto.STRING, number=2) + + ip_cidr_range = proto.Field(proto.STRING, number=3) + + secondary_ip_ranges = proto.RepeatedField( + proto.MESSAGE, number=4, message=UsableSubnetworkSecondaryRange, + ) + + status_message = proto.Field(proto.STRING, number=5) + + +class ResourceUsageExportConfig(proto.Message): + r"""Configuration for exporting cluster resource usages. + + Attributes: + bigquery_destination (~.cluster_service.ResourceUsageExportConfig.BigQueryDestination): + Configuration to use BigQuery as usage export + destination. + enable_network_egress_metering (bool): + Whether to enable network egress metering for + this cluster. If enabled, a daemonset will be + created in the cluster to meter network egress + traffic. + consumption_metering_config (~.cluster_service.ResourceUsageExportConfig.ConsumptionMeteringConfig): + Configuration to enable resource consumption + metering. + """ + + class BigQueryDestination(proto.Message): + r"""Parameters for using BigQuery as the destination of resource + usage export. + + Attributes: + dataset_id (str): + The ID of a BigQuery Dataset. + """ + + dataset_id = proto.Field(proto.STRING, number=1) + + class ConsumptionMeteringConfig(proto.Message): + r"""Parameters for controlling consumption metering. + + Attributes: + enabled (bool): + Whether to enable consumption metering for + this cluster. If enabled, a second BigQuery + table will be created to hold resource + consumption records. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + bigquery_destination = proto.Field( + proto.MESSAGE, number=1, message=BigQueryDestination, + ) + + enable_network_egress_metering = proto.Field(proto.BOOL, number=2) + + consumption_metering_config = proto.Field( + proto.MESSAGE, number=3, message=ConsumptionMeteringConfig, + ) + + +class VerticalPodAutoscaling(proto.Message): + r"""VerticalPodAutoscaling contains global, per-cluster + information required by Vertical Pod Autoscaler to automatically + adjust the resources of pods controlled by it. + + Attributes: + enabled (bool): + Enables vertical pod autoscaling. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/container_v1beta1/__init__.py b/google/cloud/container_v1beta1/__init__.py index 22df86d9..b492cba4 100644 --- a/google/cloud/container_v1beta1/__init__.py +++ b/google/cloud/container_v1beta1/__init__.py @@ -1,45 +1,195 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.container_v1beta1 import types -from google.cloud.container_v1beta1.gapic import cluster_manager_client -from google.cloud.container_v1beta1.gapic import enums - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class ClusterManagerClient(cluster_manager_client.ClusterManagerClient): - __doc__ = cluster_manager_client.ClusterManagerClient.__doc__ - enums = enums +from .services.cluster_manager import ClusterManagerClient +from .types.cluster_service import AcceleratorConfig +from .types.cluster_service import AddonsConfig +from .types.cluster_service import AuthenticatorGroupsConfig +from .types.cluster_service import AutoUpgradeOptions +from .types.cluster_service import AutoprovisioningNodePoolDefaults +from .types.cluster_service import BinaryAuthorization +from .types.cluster_service import CancelOperationRequest +from .types.cluster_service import ClientCertificateConfig +from .types.cluster_service import CloudRunConfig +from .types.cluster_service import Cluster +from .types.cluster_service import ClusterAutoscaling +from .types.cluster_service import ClusterUpdate +from .types.cluster_service import CompleteIPRotationRequest +from .types.cluster_service import CreateClusterRequest +from .types.cluster_service import CreateNodePoolRequest +from .types.cluster_service import DailyMaintenanceWindow +from .types.cluster_service import DatabaseEncryption +from .types.cluster_service import DeleteClusterRequest +from .types.cluster_service import DeleteNodePoolRequest +from .types.cluster_service import GetClusterRequest +from .types.cluster_service import GetNodePoolRequest +from .types.cluster_service import GetOperationRequest +from .types.cluster_service import GetServerConfigRequest +from .types.cluster_service import HorizontalPodAutoscaling +from .types.cluster_service import HttpLoadBalancing +from .types.cluster_service import IPAllocationPolicy +from .types.cluster_service import IntraNodeVisibilityConfig +from .types.cluster_service import IstioConfig +from .types.cluster_service import KubernetesDashboard +from .types.cluster_service import LegacyAbac +from .types.cluster_service import ListClustersRequest +from .types.cluster_service import ListClustersResponse +from .types.cluster_service import ListLocationsRequest +from .types.cluster_service import ListLocationsResponse +from .types.cluster_service import ListNodePoolsRequest +from .types.cluster_service import ListNodePoolsResponse +from .types.cluster_service import ListOperationsRequest +from .types.cluster_service import ListOperationsResponse +from .types.cluster_service import ListUsableSubnetworksRequest +from .types.cluster_service import ListUsableSubnetworksResponse +from .types.cluster_service import Location +from .types.cluster_service import MaintenancePolicy +from .types.cluster_service import MaintenanceWindow +from .types.cluster_service import MasterAuth +from .types.cluster_service import MasterAuthorizedNetworksConfig +from .types.cluster_service import MaxPodsConstraint +from .types.cluster_service import NetworkConfig +from .types.cluster_service import NetworkPolicy +from .types.cluster_service import NetworkPolicyConfig +from .types.cluster_service import NodeConfig +from .types.cluster_service import NodeManagement +from .types.cluster_service import NodePool +from .types.cluster_service import NodePoolAutoscaling +from .types.cluster_service import NodeTaint +from .types.cluster_service import Operation +from .types.cluster_service import OperationProgress +from .types.cluster_service import PodSecurityPolicyConfig +from .types.cluster_service import PrivateClusterConfig +from .types.cluster_service import RecurringTimeWindow +from .types.cluster_service import ResourceLimit +from .types.cluster_service import ResourceUsageExportConfig +from .types.cluster_service import RollbackNodePoolUpgradeRequest +from .types.cluster_service import ServerConfig +from .types.cluster_service import SetAddonsConfigRequest +from .types.cluster_service import SetLabelsRequest +from .types.cluster_service import SetLegacyAbacRequest +from .types.cluster_service import SetLocationsRequest +from .types.cluster_service import SetLoggingServiceRequest +from .types.cluster_service import SetMaintenancePolicyRequest +from .types.cluster_service import SetMasterAuthRequest +from .types.cluster_service import SetMonitoringServiceRequest +from .types.cluster_service import SetNetworkPolicyRequest +from .types.cluster_service import SetNodePoolAutoscalingRequest +from .types.cluster_service import SetNodePoolManagementRequest +from .types.cluster_service import SetNodePoolSizeRequest +from .types.cluster_service import ShieldedInstanceConfig +from .types.cluster_service import StartIPRotationRequest +from .types.cluster_service import StatusCondition +from .types.cluster_service import TimeWindow +from .types.cluster_service import UpdateClusterRequest +from .types.cluster_service import UpdateMasterRequest +from .types.cluster_service import UpdateNodePoolRequest +from .types.cluster_service import UsableSubnetwork +from .types.cluster_service import UsableSubnetworkSecondaryRange +from .types.cluster_service import VerticalPodAutoscaling +from .types.cluster_service import WorkloadMetadataConfig __all__ = ( - "enums", - "types", + "AcceleratorConfig", + "AddonsConfig", + "AuthenticatorGroupsConfig", + "AutoUpgradeOptions", + "AutoprovisioningNodePoolDefaults", + "BinaryAuthorization", + "CancelOperationRequest", + "ClientCertificateConfig", + "CloudRunConfig", + "Cluster", + "ClusterAutoscaling", + "ClusterUpdate", + "CompleteIPRotationRequest", + "CreateClusterRequest", + "CreateNodePoolRequest", + "DailyMaintenanceWindow", + "DatabaseEncryption", + "DeleteClusterRequest", + "DeleteNodePoolRequest", + "GetClusterRequest", + "GetNodePoolRequest", + "GetOperationRequest", + "GetServerConfigRequest", + "HorizontalPodAutoscaling", + "HttpLoadBalancing", + "IPAllocationPolicy", + "IntraNodeVisibilityConfig", + "IstioConfig", + "KubernetesDashboard", + "LegacyAbac", + "ListClustersRequest", + "ListClustersResponse", + "ListLocationsRequest", + "ListLocationsResponse", + "ListNodePoolsRequest", + "ListNodePoolsResponse", + "ListOperationsRequest", + "ListOperationsResponse", + "ListUsableSubnetworksRequest", + "ListUsableSubnetworksResponse", + "Location", + "MaintenancePolicy", + "MaintenanceWindow", + "MasterAuth", + "MasterAuthorizedNetworksConfig", + "MaxPodsConstraint", + "NetworkConfig", + "NetworkPolicy", + "NetworkPolicyConfig", + "NodeConfig", + "NodeManagement", + "NodePool", + "NodePoolAutoscaling", + "NodeTaint", + "Operation", + "OperationProgress", + "PodSecurityPolicyConfig", + "PrivateClusterConfig", + "RecurringTimeWindow", + "ResourceLimit", + "ResourceUsageExportConfig", + "RollbackNodePoolUpgradeRequest", + "ServerConfig", + "SetAddonsConfigRequest", + "SetLabelsRequest", + "SetLegacyAbacRequest", + "SetLocationsRequest", + "SetLoggingServiceRequest", + "SetMaintenancePolicyRequest", + "SetMasterAuthRequest", + "SetMonitoringServiceRequest", + "SetNetworkPolicyRequest", + "SetNodePoolAutoscalingRequest", + "SetNodePoolManagementRequest", + "SetNodePoolSizeRequest", + "ShieldedInstanceConfig", + "StartIPRotationRequest", + "StatusCondition", + "TimeWindow", + "UpdateClusterRequest", + "UpdateMasterRequest", + "UpdateNodePoolRequest", + "UsableSubnetwork", + "UsableSubnetworkSecondaryRange", + "VerticalPodAutoscaling", + "WorkloadMetadataConfig", "ClusterManagerClient", ) diff --git a/google/cloud/container_v1beta1/gapic/__init__.py b/google/cloud/container_v1beta1/gapic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/container_v1beta1/gapic/cluster_manager_client.py b/google/cloud/container_v1beta1/gapic/cluster_manager_client.py deleted file mode 100644 index e130eb99..00000000 --- a/google/cloud/container_v1beta1/gapic/cluster_manager_client.py +++ /dev/null @@ -1,3424 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.container.v1beta1 ClusterManager API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import grpc - -from google.cloud.container_v1beta1.gapic import cluster_manager_client_config -from google.cloud.container_v1beta1.gapic import enums -from google.cloud.container_v1beta1.gapic.transports import ( - cluster_manager_grpc_transport, -) -from google.cloud.container_v1beta1.proto import cluster_service_pb2 -from google.cloud.container_v1beta1.proto import cluster_service_pb2_grpc -from google.protobuf import empty_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-container", -).version - - -class ClusterManagerClient(object): - """Google Kubernetes Engine Cluster Manager v1beta1""" - - SERVICE_ADDRESS = "container.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.container.v1beta1.ClusterManager" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.ClusterManagerGrpcTransport, - Callable[[~.Credentials, type], ~.ClusterManagerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = cluster_manager_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=cluster_manager_grpc_transport.ClusterManagerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = cluster_manager_grpc_transport.ClusterManagerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def list_clusters( - self, - project_id, - zone, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all clusters owned by a project in either the specified zone or all - zones. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> response = client.list_clusters(project_id, zone) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the parent field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides, or "-" for all zones. This field has been - deprecated and replaced by the parent field. - parent (str): The parent (project and location) where the clusters will be listed. - Specified in the format ``projects/*/locations/*``. Location "-" matches - all zones and all regions. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.ListClustersResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListClustersRequest( - project_id=project_id, zone=zone, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_clusters"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_cluster( - self, - project_id, - zone, - cluster_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the details for a specific cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> response = client.get_cluster(project_id, zone, cluster_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to retrieve. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to retrieve. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetClusterRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_cluster( - self, - project_id, - zone, - cluster, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster, consisting of the specified number and type of - Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, the - Kubelet creates routes for each node to allow the containers on that - node to communicate with all other instances in the cluster. - - Finally, an entry is added to the project's global metadata indicating - which CIDR range the cluster is using. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(project_id, zone, cluster) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the parent field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the parent field. - cluster (Union[dict, ~google.cloud.container_v1beta1.types.Cluster]): Required. A `cluster - resource `__ - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.Cluster` - parent (str): The parent (project and location) where the cluster will be created. - Specified in the format ``projects/*/locations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CreateClusterRequest( - project_id=project_id, zone=zone, cluster=cluster, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_cluster( - self, - project_id, - zone, - cluster_id, - update, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the settings for a specific cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `update`: - >>> update = {} - >>> - >>> response = client.update_cluster(project_id, zone, cluster_id, update) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - update (Union[dict, ~google.cloud.container_v1beta1.types.ClusterUpdate]): Required. A description of the update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.ClusterUpdate` - name (str): The name (project, location, cluster) of the cluster to update. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.UpdateClusterRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - update=update, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_node_pool( - self, - project_id, - zone, - cluster_id, - node_pool_id, - node_version, - image_type, - workload_metadata_config=None, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the version and/or image type of a specific node pool. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool_id`: - >>> node_pool_id = '' - >>> - >>> # TODO: Initialize `node_version`: - >>> node_version = '' - >>> - >>> # TODO: Initialize `image_type`: - >>> image_type = '' - >>> - >>> response = client.update_node_pool(project_id, zone, cluster_id, node_pool_id, node_version, image_type) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Required. Deprecated. The name of the node pool to upgrade. - This field has been deprecated and replaced by the name field. - node_version (str): Required. The Kubernetes version to change the nodes to (typically an - upgrade). - - Users may specify either explicit versions offered by Kubernetes Engine or - version aliases, which have the following behavior: - - - "latest": picks the highest valid Kubernetes version - - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit Kubernetes version - - "-": picks the Kubernetes master version - image_type (str): Required. The desired image type for the node pool. - workload_metadata_config (Union[dict, ~google.cloud.container_v1beta1.types.WorkloadMetadataConfig]): The desired image type for the node pool. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.WorkloadMetadataConfig` - name (str): The name (project, location, cluster, node pool) of the node pool to - update. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "update_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_node_pool, - default_retry=self._method_configs["UpdateNodePool"].retry, - default_timeout=self._method_configs["UpdateNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.UpdateNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - node_version=node_version, - image_type=image_type, - workload_metadata_config=workload_metadata_config, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_node_pool_autoscaling( - self, - project_id, - zone, - cluster_id, - node_pool_id, - autoscaling, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the autoscaling settings of a specific node pool. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool_id`: - >>> node_pool_id = '' - >>> - >>> # TODO: Initialize `autoscaling`: - >>> autoscaling = {} - >>> - >>> response = client.set_node_pool_autoscaling(project_id, zone, cluster_id, node_pool_id, autoscaling) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Required. Deprecated. The name of the node pool to upgrade. - This field has been deprecated and replaced by the name field. - autoscaling (Union[dict, ~google.cloud.container_v1beta1.types.NodePoolAutoscaling]): Required. Autoscaling configuration for the node pool. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.NodePoolAutoscaling` - name (str): The name (project, location, cluster, node pool) of the node pool to - set autoscaler settings. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_node_pool_autoscaling" not in self._inner_api_calls: - self._inner_api_calls[ - "set_node_pool_autoscaling" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_node_pool_autoscaling, - default_retry=self._method_configs["SetNodePoolAutoscaling"].retry, - default_timeout=self._method_configs["SetNodePoolAutoscaling"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNodePoolAutoscalingRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - autoscaling=autoscaling, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_node_pool_autoscaling"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_logging_service( - self, - project_id, - zone, - cluster_id, - logging_service, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the logging service for a specific cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `logging_service`: - >>> logging_service = '' - >>> - >>> response = client.set_logging_service(project_id, zone, cluster_id, logging_service) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - logging_service (str): Required. The logging service the cluster should use to write - metrics. Currently available options: - - - "logging.googleapis.com" - the Google Cloud Logging service - - "none" - no metrics will be exported from the cluster - name (str): The name (project, location, cluster) of the cluster to set logging. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_logging_service" not in self._inner_api_calls: - self._inner_api_calls[ - "set_logging_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_logging_service, - default_retry=self._method_configs["SetLoggingService"].retry, - default_timeout=self._method_configs["SetLoggingService"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLoggingServiceRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - logging_service=logging_service, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_logging_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_monitoring_service( - self, - project_id, - zone, - cluster_id, - monitoring_service, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the monitoring service for a specific cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `monitoring_service`: - >>> monitoring_service = '' - >>> - >>> response = client.set_monitoring_service(project_id, zone, cluster_id, monitoring_service) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - monitoring_service (str): Required. The monitoring service the cluster should use to write - metrics. Currently available options: - - - "monitoring.googleapis.com" - the Google Cloud Monitoring service - - "none" - no metrics will be exported from the cluster - name (str): The name (project, location, cluster) of the cluster to set - monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_monitoring_service" not in self._inner_api_calls: - self._inner_api_calls[ - "set_monitoring_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_monitoring_service, - default_retry=self._method_configs["SetMonitoringService"].retry, - default_timeout=self._method_configs["SetMonitoringService"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetMonitoringServiceRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - monitoring_service=monitoring_service, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_monitoring_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_addons_config( - self, - project_id, - zone, - cluster_id, - addons_config, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the addons for a specific cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `addons_config`: - >>> addons_config = {} - >>> - >>> response = client.set_addons_config(project_id, zone, cluster_id, addons_config) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - addons_config (Union[dict, ~google.cloud.container_v1beta1.types.AddonsConfig]): Required. The desired configurations for the various addons available to run in the - cluster. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.AddonsConfig` - name (str): The name (project, location, cluster) of the cluster to set addons. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_addons_config" not in self._inner_api_calls: - self._inner_api_calls[ - "set_addons_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_addons_config, - default_retry=self._method_configs["SetAddonsConfig"].retry, - default_timeout=self._method_configs["SetAddonsConfig"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetAddonsConfigRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - addons_config=addons_config, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_addons_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_locations( - self, - project_id, - zone, - cluster_id, - locations, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the locations for a specific cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `locations`: - >>> locations = [] - >>> - >>> response = client.set_locations(project_id, zone, cluster_id, locations) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - locations (list[str]): Required. The desired list of Google Compute Engine - `zones `__ in - which the cluster's nodes should be located. Changing the locations a - cluster is in will result in nodes being either created or removed from - the cluster, depending on whether locations are being added or removed. - - This list must always include the cluster's primary zone. - name (str): The name (project, location, cluster) of the cluster to set - locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_locations" not in self._inner_api_calls: - self._inner_api_calls[ - "set_locations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_locations, - default_retry=self._method_configs["SetLocations"].retry, - default_timeout=self._method_configs["SetLocations"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLocationsRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - locations=locations, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_locations"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_master( - self, - project_id, - zone, - cluster_id, - master_version, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the master for a specific cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `master_version`: - >>> master_version = '' - >>> - >>> response = client.update_master(project_id, zone, cluster_id, master_version) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - master_version (str): Required. The Kubernetes version to change the master to. - - Users may specify either explicit versions offered by - Kubernetes Engine or version aliases, which have the following behavior: - - - "latest": picks the highest valid Kubernetes version - - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit Kubernetes version - - "-": picks the default Kubernetes version - name (str): The name (project, location, cluster) of the cluster to update. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_master" not in self._inner_api_calls: - self._inner_api_calls[ - "update_master" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_master, - default_retry=self._method_configs["UpdateMaster"].retry, - default_timeout=self._method_configs["UpdateMaster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.UpdateMasterRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - master_version=master_version, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_master"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_master_auth( - self, - project_id, - zone, - cluster_id, - action, - update, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets master auth materials. Currently supports changing the admin password - or a specific cluster, either via password generation or explicitly setting - the password. - - Example: - >>> from google.cloud import container_v1beta1 - >>> from google.cloud.container_v1beta1 import enums - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `action`: - >>> action = enums.SetMasterAuthRequest.Action.UNKNOWN - >>> - >>> # TODO: Initialize `update`: - >>> update = {} - >>> - >>> response = client.set_master_auth(project_id, zone, cluster_id, action, update) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to upgrade. - This field has been deprecated and replaced by the name field. - action (~google.cloud.container_v1beta1.types.Action): Required. The exact form of action to be taken on the master auth. - update (Union[dict, ~google.cloud.container_v1beta1.types.MasterAuth]): Required. A description of the update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.MasterAuth` - name (str): The name (project, location, cluster) of the cluster to set auth. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_master_auth" not in self._inner_api_calls: - self._inner_api_calls[ - "set_master_auth" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_master_auth, - default_retry=self._method_configs["SetMasterAuth"].retry, - default_timeout=self._method_configs["SetMasterAuth"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetMasterAuthRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - action=action, - update=update, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_master_auth"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_cluster( - self, - project_id, - zone, - cluster_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes the cluster, including the Kubernetes endpoint and all worker - nodes. - - Firewalls and routes that were configured during cluster creation - are also deleted. - - Other Google Compute Engine resources that might be in use by the cluster, - such as load balancer resources, are not deleted if they weren't present - when the cluster was initially created. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> response = client.delete_cluster(project_id, zone, cluster_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to delete. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster) of the cluster to delete. - Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.DeleteClusterRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_operations( - self, - project_id, - zone, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all operations in a project in the specified zone or all zones. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> response = client.list_operations(project_id, zone) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the parent field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ to - return operations for, or ``-`` for all zones. This field has been - deprecated and replaced by the parent field. - parent (str): The parent (project and location) where the operations will be - listed. Specified in the format ``projects/*/locations/*``. Location "-" - matches all zones and all regions. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.ListOperationsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_operations" not in self._inner_api_calls: - self._inner_api_calls[ - "list_operations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_operations, - default_retry=self._method_configs["ListOperations"].retry, - default_timeout=self._method_configs["ListOperations"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListOperationsRequest( - project_id=project_id, zone=zone, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_operations"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_operation( - self, - project_id, - zone, - operation_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the specified operation. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `operation_id`: - >>> operation_id = '' - >>> - >>> response = client.get_operation(project_id, zone, operation_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - operation_id (str): Required. Deprecated. The server-assigned ``name`` of the operation. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, operation id) of the operation to get. - Specified in the format ``projects/*/locations/*/operations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "get_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_operation, - default_retry=self._method_configs["GetOperation"].retry, - default_timeout=self._method_configs["GetOperation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetOperationRequest( - project_id=project_id, zone=zone, operation_id=operation_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def cancel_operation( - self, - project_id, - zone, - operation_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Cancels the specified operation. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `operation_id`: - >>> operation_id = '' - >>> - >>> client.cancel_operation(project_id, zone, operation_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the operation resides. This field has been deprecated and replaced - by the name field. - operation_id (str): Required. Deprecated. The server-assigned ``name`` of the operation. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, operation id) of the operation to - cancel. Specified in the format ``projects/*/locations/*/operations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "cancel_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "cancel_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.cancel_operation, - default_retry=self._method_configs["CancelOperation"].retry, - default_timeout=self._method_configs["CancelOperation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CancelOperationRequest( - project_id=project_id, zone=zone, operation_id=operation_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["cancel_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_server_config( - self, - project_id, - zone, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns configuration info about the Google Kubernetes Engine service. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> response = client.get_server_config(project_id, zone) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ to - return operations for. This field has been deprecated and replaced by - the name field. - name (str): The name (project and location) of the server config to get, - specified in the format ``projects/*/locations/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.ServerConfig` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_server_config" not in self._inner_api_calls: - self._inner_api_calls[ - "get_server_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_server_config, - default_retry=self._method_configs["GetServerConfig"].retry, - default_timeout=self._method_configs["GetServerConfig"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetServerConfigRequest( - project_id=project_id, zone=zone, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_server_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_node_pools( - self, - project_id, - zone, - cluster_id, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the node pools for a cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> response = client.list_node_pools(project_id, zone, cluster_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the parent field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the parent field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the parent field. - parent (str): The parent (project, location, cluster id) where the node pools will - be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.ListNodePoolsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_node_pools" not in self._inner_api_calls: - self._inner_api_calls[ - "list_node_pools" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_node_pools, - default_retry=self._method_configs["ListNodePools"].retry, - default_timeout=self._method_configs["ListNodePools"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListNodePoolsRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_node_pools"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_node_pool( - self, - project_id, - zone, - cluster_id, - node_pool_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves the requested node pool. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool_id`: - >>> node_pool_id = '' - >>> - >>> response = client.get_node_pool(project_id, zone, cluster_id, node_pool_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Required. Deprecated. The name of the node pool. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node pool - to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.NodePool` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "get_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_node_pool, - default_retry=self._method_configs["GetNodePool"].retry, - default_timeout=self._method_configs["GetNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.GetNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_node_pool( - self, - project_id, - zone, - cluster_id, - node_pool, - parent=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a node pool for a cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool`: - >>> node_pool = {} - >>> - >>> response = client.create_node_pool(project_id, zone, cluster_id, node_pool) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the parent field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the parent field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the parent field. - node_pool (Union[dict, ~google.cloud.container_v1beta1.types.NodePool]): Required. The node pool to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.NodePool` - parent (str): The parent (project, location, cluster id) where the node pool will - be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "create_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_node_pool, - default_retry=self._method_configs["CreateNodePool"].retry, - default_timeout=self._method_configs["CreateNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CreateNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool=node_pool, - parent=parent, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_node_pool( - self, - project_id, - zone, - cluster_id, - node_pool_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a node pool from a cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool_id`: - >>> node_pool_id = '' - >>> - >>> response = client.delete_node_pool(project_id, zone, cluster_id, node_pool_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Required. Deprecated. The name of the node pool to delete. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node pool - to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_node_pool" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_node_pool" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_node_pool, - default_retry=self._method_configs["DeleteNodePool"].retry, - default_timeout=self._method_configs["DeleteNodePool"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.DeleteNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["delete_node_pool"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def rollback_node_pool_upgrade( - self, - project_id, - zone, - cluster_id, - node_pool_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Rolls back a previously Aborted or Failed NodePool upgrade. - This makes no changes if the last upgrade successfully completed. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool_id`: - >>> node_pool_id = '' - >>> - >>> response = client.rollback_node_pool_upgrade(project_id, zone, cluster_id, node_pool_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to rollback. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Required. Deprecated. The name of the node pool to rollback. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster, node pool id) of the node poll - to rollback upgrade. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "rollback_node_pool_upgrade" not in self._inner_api_calls: - self._inner_api_calls[ - "rollback_node_pool_upgrade" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.rollback_node_pool_upgrade, - default_retry=self._method_configs["RollbackNodePoolUpgrade"].retry, - default_timeout=self._method_configs["RollbackNodePoolUpgrade"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.RollbackNodePoolUpgradeRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["rollback_node_pool_upgrade"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_node_pool_management( - self, - project_id, - zone, - cluster_id, - node_pool_id, - management, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the NodeManagement options for a node pool. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool_id`: - >>> node_pool_id = '' - >>> - >>> # TODO: Initialize `management`: - >>> management = {} - >>> - >>> response = client.set_node_pool_management(project_id, zone, cluster_id, node_pool_id, management) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to update. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Required. Deprecated. The name of the node pool to update. - This field has been deprecated and replaced by the name field. - management (Union[dict, ~google.cloud.container_v1beta1.types.NodeManagement]): Required. NodeManagement configuration for the node pool. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.NodeManagement` - name (str): The name (project, location, cluster, node pool id) of the node pool - to set management properties. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_node_pool_management" not in self._inner_api_calls: - self._inner_api_calls[ - "set_node_pool_management" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_node_pool_management, - default_retry=self._method_configs["SetNodePoolManagement"].retry, - default_timeout=self._method_configs["SetNodePoolManagement"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNodePoolManagementRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - management=management, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_node_pool_management"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_labels( - self, - project_id, - zone, - cluster_id, - resource_labels, - label_fingerprint, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets labels on a cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `resource_labels`: - >>> resource_labels = {} - >>> - >>> # TODO: Initialize `label_fingerprint`: - >>> label_fingerprint = '' - >>> - >>> response = client.set_labels(project_id, zone, cluster_id, resource_labels, label_fingerprint) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - resource_labels (dict[str -> str]): Required. The labels to set for that cluster. - label_fingerprint (str): Required. The fingerprint of the previous set of labels for this resource, - used to detect conflicts. The fingerprint is initially generated by - Kubernetes Engine and changes after every request to modify or update - labels. You must always provide an up-to-date fingerprint hash when - updating or changing labels. Make a get() request to the - resource to get the latest fingerprint. - name (str): The name (project, location, cluster id) of the cluster to set - labels. Specified in the format ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_labels" not in self._inner_api_calls: - self._inner_api_calls[ - "set_labels" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_labels, - default_retry=self._method_configs["SetLabels"].retry, - default_timeout=self._method_configs["SetLabels"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLabelsRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - resource_labels=resource_labels, - label_fingerprint=label_fingerprint, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_labels"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_legacy_abac( - self, - project_id, - zone, - cluster_id, - enabled, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Enables or disables the ABAC authorization mechanism on a cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `enabled`: - >>> enabled = False - >>> - >>> response = client.set_legacy_abac(project_id, zone, cluster_id, enabled) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to update. - This field has been deprecated and replaced by the name field. - enabled (bool): Required. Whether ABAC authorization will be enabled in the cluster. - name (str): The name (project, location, cluster id) of the cluster to set - legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_legacy_abac" not in self._inner_api_calls: - self._inner_api_calls[ - "set_legacy_abac" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_legacy_abac, - default_retry=self._method_configs["SetLegacyAbac"].retry, - default_timeout=self._method_configs["SetLegacyAbac"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetLegacyAbacRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - enabled=enabled, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_legacy_abac"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def start_i_p_rotation( - self, - project_id, - zone, - cluster_id, - name=None, - rotate_credentials=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts master IP rotation. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> response = client.start_i_p_rotation(project_id, zone, cluster_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster id) of the cluster to start IP - rotation. Specified in the format ``projects/*/locations/*/clusters/*``. - rotate_credentials (bool): Whether to rotate credentials during IP rotation. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "start_i_p_rotation" not in self._inner_api_calls: - self._inner_api_calls[ - "start_i_p_rotation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.start_i_p_rotation, - default_retry=self._method_configs["StartIPRotation"].retry, - default_timeout=self._method_configs["StartIPRotation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.StartIPRotationRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - name=name, - rotate_credentials=rotate_credentials, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["start_i_p_rotation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def complete_i_p_rotation( - self, - project_id, - zone, - cluster_id, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Completes master IP rotation. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> response = client.complete_i_p_rotation(project_id, zone, cluster_id) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - name (str): The name (project, location, cluster id) of the cluster to complete - IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "complete_i_p_rotation" not in self._inner_api_calls: - self._inner_api_calls[ - "complete_i_p_rotation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.complete_i_p_rotation, - default_retry=self._method_configs["CompleteIPRotation"].retry, - default_timeout=self._method_configs["CompleteIPRotation"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.CompleteIPRotationRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["complete_i_p_rotation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_node_pool_size( - self, - project_id, - zone, - cluster_id, - node_pool_id, - node_count, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the size for a specific node pool. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `node_pool_id`: - >>> node_pool_id = '' - >>> - >>> # TODO: Initialize `node_count`: - >>> node_count = 0 - >>> - >>> response = client.set_node_pool_size(project_id, zone, cluster_id, node_pool_id, node_count) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster to update. - This field has been deprecated and replaced by the name field. - node_pool_id (str): Required. Deprecated. The name of the node pool to update. - This field has been deprecated and replaced by the name field. - node_count (int): Required. The desired node count for the pool. - name (str): The name (project, location, cluster, node pool id) of the node pool - to set size. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_node_pool_size" not in self._inner_api_calls: - self._inner_api_calls[ - "set_node_pool_size" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_node_pool_size, - default_retry=self._method_configs["SetNodePoolSize"].retry, - default_timeout=self._method_configs["SetNodePoolSize"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNodePoolSizeRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - node_count=node_count, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_node_pool_size"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_network_policy( - self, - project_id, - zone, - cluster_id, - network_policy, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Enables or disables Network Policy for a cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `network_policy`: - >>> network_policy = {} - >>> - >>> response = client.set_network_policy(project_id, zone, cluster_id, network_policy) - - Args: - project_id (str): Required. Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name field. - zone (str): Required. Deprecated. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. This field has been deprecated and replaced - by the name field. - cluster_id (str): Required. Deprecated. The name of the cluster. - This field has been deprecated and replaced by the name field. - network_policy (Union[dict, ~google.cloud.container_v1beta1.types.NetworkPolicy]): Required. Configuration options for the NetworkPolicy feature. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.NetworkPolicy` - name (str): The name (project, location, cluster id) of the cluster to set - networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_network_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_network_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_network_policy, - default_retry=self._method_configs["SetNetworkPolicy"].retry, - default_timeout=self._method_configs["SetNetworkPolicy"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetNetworkPolicyRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - network_policy=network_policy, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_network_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_maintenance_policy( - self, - project_id, - zone, - cluster_id, - maintenance_policy, - name=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the maintenance policy for a cluster. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `zone`: - >>> zone = '' - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `maintenance_policy`: - >>> maintenance_policy = {} - >>> - >>> response = client.set_maintenance_policy(project_id, zone, cluster_id, maintenance_policy) - - Args: - project_id (str): Required. The Google Developers Console `project ID or project - number `__. - zone (str): Required. The name of the Google Compute Engine - `zone `__ in - which the cluster resides. - cluster_id (str): Required. The name of the cluster to update. - maintenance_policy (Union[dict, ~google.cloud.container_v1beta1.types.MaintenancePolicy]): Required. The maintenance policy to be set for the cluster. An empty field - clears the existing maintenance policy. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.container_v1beta1.types.MaintenancePolicy` - name (str): The name (project, location, cluster id) of the cluster to set - maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_maintenance_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_maintenance_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_maintenance_policy, - default_retry=self._method_configs["SetMaintenancePolicy"].retry, - default_timeout=self._method_configs["SetMaintenancePolicy"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.SetMaintenancePolicyRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - maintenance_policy=maintenance_policy, - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_maintenance_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_usable_subnetworks( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists subnetworks that can be used for creating clusters in a project. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `parent`: - >>> parent = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_usable_subnetworks(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_usable_subnetworks(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The parent project where subnetworks are usable. Specified - in the format ``projects/*``. - filter_ (str): Filtering currently only supports equality on the networkProjectId - and must be in the form: "networkProjectId=[PROJECTID]", where - ``networkProjectId`` is the project which owns the listed subnetworks. - This defaults to the parent project ID. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.container_v1beta1.types.UsableSubnetwork` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_usable_subnetworks" not in self._inner_api_calls: - self._inner_api_calls[ - "list_usable_subnetworks" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_usable_subnetworks, - default_retry=self._method_configs["ListUsableSubnetworks"].retry, - default_timeout=self._method_configs["ListUsableSubnetworks"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListUsableSubnetworksRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_usable_subnetworks"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="subnetworks", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def list_locations( - self, - parent, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Fetches locations that offer Google Kubernetes Engine. - - Example: - >>> from google.cloud import container_v1beta1 - >>> - >>> client = container_v1beta1.ClusterManagerClient() - >>> - >>> # TODO: Initialize `parent`: - >>> parent = '' - >>> - >>> response = client.list_locations(parent) - - Args: - parent (str): Required. Contains the name of the resource requested. Specified in - the format ``projects/*``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.container_v1beta1.types.ListLocationsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_locations" not in self._inner_api_calls: - self._inner_api_calls[ - "list_locations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_locations, - default_retry=self._method_configs["ListLocations"].retry, - default_timeout=self._method_configs["ListLocations"].timeout, - client_info=self._client_info, - ) - - request = cluster_service_pb2.ListLocationsRequest(parent=parent,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_locations"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/container_v1beta1/gapic/cluster_manager_client_config.py b/google/cloud/container_v1beta1/gapic/cluster_manager_client_config.py deleted file mode 100644 index 15607e49..00000000 --- a/google/cloud/container_v1beta1/gapic/cluster_manager_client_config.py +++ /dev/null @@ -1,183 +0,0 @@ -config = { - "interfaces": { - "google.container.v1beta1.ClusterManager": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNodePoolAutoscaling": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLoggingService": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetMonitoringService": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetAddonsConfig": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLocations": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateMaster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetMasterAuth": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListOperations": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetOperation": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CancelOperation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetServerConfig": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListNodePools": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteNodePool": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "RollbackNodePoolUpgrade": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNodePoolManagement": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLabels": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetLegacyAbac": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "StartIPRotation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CompleteIPRotation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNodePoolSize": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetNetworkPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetMaintenancePolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListUsableSubnetworks": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListLocations": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/container_v1beta1/gapic/enums.py b/google/cloud/container_v1beta1/gapic/enums.py deleted file mode 100644 index 45c2714c..00000000 --- a/google/cloud/container_v1beta1/gapic/enums.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class Cluster(object): - class Status(enum.IntEnum): - """ - The current status of the cluster. - - Attributes: - STATUS_UNSPECIFIED (int): Not set. - PROVISIONING (int): The PROVISIONING state indicates the cluster is being created. - RUNNING (int): The RUNNING state indicates the cluster has been created and is fully - usable. - RECONCILING (int): The RECONCILING state indicates that some work is actively being - done on the cluster, such as upgrading the master or node software. - Details can be found in the ``statusMessage`` field. - STOPPING (int): The STOPPING state indicates the cluster is being deleted. - ERROR (int): The ERROR state indicates the cluster may be unusable. Details can - be found in the ``statusMessage`` field. - DEGRADED (int): The DEGRADED state indicates the cluster requires user action to - restore full functionality. Details can be found in the - ``statusMessage`` field. - """ - - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RECONCILING = 3 - STOPPING = 4 - ERROR = 5 - DEGRADED = 6 - - -class DatabaseEncryption(object): - class State(enum.IntEnum): - """ - State of etcd encryption. - - Attributes: - UNKNOWN (int): Should never be set - ENCRYPTED (int): Secrets in etcd are encrypted. - DECRYPTED (int): Secrets in etcd are stored in plain text (at etcd level) - this is - unrelated to Google Compute Engine level full disk encryption. - """ - - UNKNOWN = 0 - ENCRYPTED = 1 - DECRYPTED = 2 - - -class IstioConfig(object): - class IstioAuthMode(enum.IntEnum): - """ - Istio auth mode, https://istio.io/docs/concepts/security/mutual-tls.html - - Attributes: - AUTH_NONE (int): auth not enabled - AUTH_MUTUAL_TLS (int): auth mutual TLS enabled - """ - - AUTH_NONE = 0 - AUTH_MUTUAL_TLS = 1 - - -class Location(object): - class LocationType(enum.IntEnum): - """ - LocationType is the type of GKE location, regional or zonal. - - Attributes: - LOCATION_TYPE_UNSPECIFIED (int): LOCATION_TYPE_UNSPECIFIED means the location type was not - determined. - ZONE (int): A GKE Location where Zonal clusters can be created. - REGION (int): A GKE Location where Regional clusters can be created. - """ - - LOCATION_TYPE_UNSPECIFIED = 0 - ZONE = 1 - REGION = 2 - - -class NetworkPolicy(object): - class Provider(enum.IntEnum): - """ - Allowed Network Policy providers. - - Attributes: - PROVIDER_UNSPECIFIED (int): Not set - CALICO (int): Tigera (Calico Felix). - """ - - PROVIDER_UNSPECIFIED = 0 - CALICO = 1 - - -class NodePool(object): - class Status(enum.IntEnum): - """ - The current status of the node pool instance. - - Attributes: - STATUS_UNSPECIFIED (int): Not set. - PROVISIONING (int): The PROVISIONING state indicates the node pool is being created. - RUNNING (int): The RUNNING state indicates the node pool has been created - and is fully usable. - RUNNING_WITH_ERROR (int): The RUNNING_WITH_ERROR state indicates the node pool has been - created and is partially usable. Some error state has occurred and some - functionality may be impaired. Customer may need to reissue a request or - trigger a new update. - RECONCILING (int): The RECONCILING state indicates that some work is actively being - done on the node pool, such as upgrading node software. Details can be - found in the ``statusMessage`` field. - STOPPING (int): The STOPPING state indicates the node pool is being deleted. - ERROR (int): The ERROR state indicates the node pool may be unusable. Details can - be found in the ``statusMessage`` field. - """ - - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RUNNING_WITH_ERROR = 3 - RECONCILING = 4 - STOPPING = 5 - ERROR = 6 - - -class NodeTaint(object): - class Effect(enum.IntEnum): - """ - Possible values for Effect in taint. - - Attributes: - EFFECT_UNSPECIFIED (int): Not set - NO_SCHEDULE (int): NoSchedule - PREFER_NO_SCHEDULE (int): PreferNoSchedule - NO_EXECUTE (int): NoExecute - """ - - EFFECT_UNSPECIFIED = 0 - NO_SCHEDULE = 1 - PREFER_NO_SCHEDULE = 2 - NO_EXECUTE = 3 - - -class Operation(object): - class Status(enum.IntEnum): - """ - Current status of the operation. - - Attributes: - STATUS_UNSPECIFIED (int): Not set. - PENDING (int): The operation has been created. - RUNNING (int): The operation is currently running. - DONE (int): The operation is done, either cancelled or completed. - ABORTING (int): The operation is aborting. - """ - - STATUS_UNSPECIFIED = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - ABORTING = 4 - - class Type(enum.IntEnum): - """ - Operation type. - - Attributes: - TYPE_UNSPECIFIED (int): Not set. - CREATE_CLUSTER (int): Cluster create. - DELETE_CLUSTER (int): Cluster delete. - UPGRADE_MASTER (int): A master upgrade. - UPGRADE_NODES (int): A node upgrade. - REPAIR_CLUSTER (int): Cluster repair. - UPDATE_CLUSTER (int): Cluster update. - CREATE_NODE_POOL (int): Node pool create. - DELETE_NODE_POOL (int): Node pool delete. - SET_NODE_POOL_MANAGEMENT (int): Set node pool management. - AUTO_REPAIR_NODES (int): Automatic node pool repair. - AUTO_UPGRADE_NODES (int): Automatic node upgrade. - SET_LABELS (int): Set labels. - SET_MASTER_AUTH (int): Set/generate master auth materials - SET_NODE_POOL_SIZE (int): Set node pool size. - SET_NETWORK_POLICY (int): Updates network policy for a cluster. - SET_MAINTENANCE_POLICY (int): Set the maintenance policy. - """ - - TYPE_UNSPECIFIED = 0 - CREATE_CLUSTER = 1 - DELETE_CLUSTER = 2 - UPGRADE_MASTER = 3 - UPGRADE_NODES = 4 - REPAIR_CLUSTER = 5 - UPDATE_CLUSTER = 6 - CREATE_NODE_POOL = 7 - DELETE_NODE_POOL = 8 - SET_NODE_POOL_MANAGEMENT = 9 - AUTO_REPAIR_NODES = 10 - AUTO_UPGRADE_NODES = 11 - SET_LABELS = 12 - SET_MASTER_AUTH = 13 - SET_NODE_POOL_SIZE = 14 - SET_NETWORK_POLICY = 15 - SET_MAINTENANCE_POLICY = 16 - - -class SetMasterAuthRequest(object): - class Action(enum.IntEnum): - """ - Operation type: what type update to perform. - - Attributes: - UNKNOWN (int): Operation is unknown and will error out. - SET_PASSWORD (int): Set the password to a user generated value. - GENERATE_PASSWORD (int): Generate a new password and set it to that. - SET_USERNAME (int): Set the username. If an empty username is provided, basic authentication - is disabled for the cluster. If a non-empty username is provided, basic - authentication is enabled, with either a provided password or a generated - one. - """ - - UNKNOWN = 0 - SET_PASSWORD = 1 - GENERATE_PASSWORD = 2 - SET_USERNAME = 3 - - -class StatusCondition(object): - class Code(enum.IntEnum): - """ - Code for each condition - - Attributes: - UNKNOWN (int): UNKNOWN indicates a generic condition. - GCE_STOCKOUT (int): GCE_STOCKOUT indicates a Google Compute Engine stockout. - GKE_SERVICE_ACCOUNT_DELETED (int): GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their - robot service account. - GCE_QUOTA_EXCEEDED (int): Google Compute Engine quota was exceeded. - SET_BY_OPERATOR (int): Cluster state was manually changed by an SRE due to a system logic error. - CLOUD_KMS_KEY_ERROR (int): Unable to perform an encrypt operation against the CloudKMS key used for - etcd level encryption. - More codes TBA - """ - - UNKNOWN = 0 - GCE_STOCKOUT = 1 - GKE_SERVICE_ACCOUNT_DELETED = 2 - GCE_QUOTA_EXCEEDED = 3 - SET_BY_OPERATOR = 4 - CLOUD_KMS_KEY_ERROR = 7 - - -class UsableSubnetworkSecondaryRange(object): - class Status(enum.IntEnum): - """ - Status shows the current usage of a secondary IP range. - - Attributes: - UNKNOWN (int): UNKNOWN is the zero value of the Status enum. It's not a valid status. - UNUSED (int): UNUSED denotes that this range is unclaimed by any cluster. - IN_USE_SERVICE (int): IN_USE_SERVICE denotes that this range is claimed by a cluster for - services. It cannot be used for other clusters. - IN_USE_SHAREABLE_POD (int): IN_USE_SHAREABLE_POD denotes this range was created by the network - admin and is currently claimed by a cluster for pods. It can only be - used by other clusters as a pod range. - IN_USE_MANAGED_POD (int): IN_USE_MANAGED_POD denotes this range was created by GKE and is - claimed for pods. It cannot be used for other clusters. - """ - - UNKNOWN = 0 - UNUSED = 1 - IN_USE_SERVICE = 2 - IN_USE_SHAREABLE_POD = 3 - IN_USE_MANAGED_POD = 4 - - -class WorkloadMetadataConfig(object): - class NodeMetadata(enum.IntEnum): - """ - NodeMetadata is the configuration for if and how to expose the node - metadata to the workload running on the node. - - Attributes: - UNSPECIFIED (int): Not set. - SECURE (int): Prevent workloads not in hostNetwork from accessing certain VM metadata, - specifically kube-env, which contains Kubelet credentials, and the - instance identity token. - - Metadata concealment is a temporary security solution available while the - bootstrapping process for cluster nodes is being redesigned with - significant security improvements. This feature is scheduled to be - deprecated in the future and later removed. - EXPOSE (int): Expose all VM metadata to pods. - """ - - UNSPECIFIED = 0 - SECURE = 1 - EXPOSE = 2 diff --git a/google/cloud/container_v1beta1/gapic/transports/__init__.py b/google/cloud/container_v1beta1/gapic/transports/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/container_v1beta1/gapic/transports/cluster_manager_grpc_transport.py b/google/cloud/container_v1beta1/gapic/transports/cluster_manager_grpc_transport.py deleted file mode 100644 index bafe8449..00000000 --- a/google/cloud/container_v1beta1/gapic/transports/cluster_manager_grpc_transport.py +++ /dev/null @@ -1,549 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.container_v1beta1.proto import cluster_service_pb2_grpc - - -class ClusterManagerGrpcTransport(object): - """gRPC transport class providing stubs for - google.container.v1beta1 ClusterManager API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="container.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "cluster_manager_stub": cluster_service_pb2_grpc.ClusterManagerStub( - channel - ), - } - - @classmethod - def create_channel( - cls, address="container.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_clusters`. - - Lists all clusters owned by a project in either the specified zone or all - zones. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListClusters - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_cluster`. - - Gets the details for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetCluster - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.create_cluster`. - - Creates a cluster, consisting of the specified number and type of - Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, the - Kubelet creates routes for each node to allow the containers on that - node to communicate with all other instances in the cluster. - - Finally, an entry is added to the project's global metadata indicating - which CIDR range the cluster is using. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CreateCluster - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.update_cluster`. - - Updates the settings for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].UpdateCluster - - @property - def update_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.update_node_pool`. - - Updates the version and/or image type of a specific node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].UpdateNodePool - - @property - def set_node_pool_autoscaling(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_node_pool_autoscaling`. - - Sets the autoscaling settings of a specific node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNodePoolAutoscaling - - @property - def set_logging_service(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_logging_service`. - - Sets the logging service for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLoggingService - - @property - def set_monitoring_service(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_monitoring_service`. - - Sets the monitoring service for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetMonitoringService - - @property - def set_addons_config(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_addons_config`. - - Sets the addons for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetAddonsConfig - - @property - def set_locations(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_locations`. - - Sets the locations for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLocations - - @property - def update_master(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.update_master`. - - Updates the master for a specific cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].UpdateMaster - - @property - def set_master_auth(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_master_auth`. - - Sets master auth materials. Currently supports changing the admin password - or a specific cluster, either via password generation or explicitly setting - the password. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetMasterAuth - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.delete_cluster`. - - Deletes the cluster, including the Kubernetes endpoint and all worker - nodes. - - Firewalls and routes that were configured during cluster creation - are also deleted. - - Other Google Compute Engine resources that might be in use by the cluster, - such as load balancer resources, are not deleted if they weren't present - when the cluster was initially created. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].DeleteCluster - - @property - def list_operations(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_operations`. - - Lists all operations in a project in the specified zone or all zones. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListOperations - - @property - def get_operation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_operation`. - - Gets the specified operation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetOperation - - @property - def cancel_operation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.cancel_operation`. - - Cancels the specified operation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CancelOperation - - @property - def get_server_config(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_server_config`. - - Returns configuration info about the Google Kubernetes Engine service. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetServerConfig - - @property - def list_node_pools(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_node_pools`. - - Lists the node pools for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListNodePools - - @property - def get_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.get_node_pool`. - - Retrieves the requested node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].GetNodePool - - @property - def create_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.create_node_pool`. - - Creates a node pool for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CreateNodePool - - @property - def delete_node_pool(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.delete_node_pool`. - - Deletes a node pool from a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].DeleteNodePool - - @property - def rollback_node_pool_upgrade(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.rollback_node_pool_upgrade`. - - Rolls back a previously Aborted or Failed NodePool upgrade. - This makes no changes if the last upgrade successfully completed. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].RollbackNodePoolUpgrade - - @property - def set_node_pool_management(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_node_pool_management`. - - Sets the NodeManagement options for a node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNodePoolManagement - - @property - def set_labels(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_labels`. - - Sets labels on a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLabels - - @property - def set_legacy_abac(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_legacy_abac`. - - Enables or disables the ABAC authorization mechanism on a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetLegacyAbac - - @property - def start_i_p_rotation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.start_i_p_rotation`. - - Starts master IP rotation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].StartIPRotation - - @property - def complete_i_p_rotation(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.complete_i_p_rotation`. - - Completes master IP rotation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].CompleteIPRotation - - @property - def set_node_pool_size(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_node_pool_size`. - - Sets the size for a specific node pool. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNodePoolSize - - @property - def set_network_policy(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_network_policy`. - - Enables or disables Network Policy for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetNetworkPolicy - - @property - def set_maintenance_policy(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.set_maintenance_policy`. - - Sets the maintenance policy for a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].SetMaintenancePolicy - - @property - def list_usable_subnetworks(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_usable_subnetworks`. - - Lists subnetworks that can be used for creating clusters in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListUsableSubnetworks - - @property - def list_locations(self): - """Return the gRPC stub for :meth:`ClusterManagerClient.list_locations`. - - Fetches locations that offer Google Kubernetes Engine. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_manager_stub"].ListLocations diff --git a/google/cloud/container_v1beta1/proto/cluster_service_pb2.py b/google/cloud/container_v1beta1/proto/cluster_service_pb2.py deleted file mode 100644 index 870f8dbe..00000000 --- a/google/cloud/container_v1beta1/proto/cluster_service_pb2.py +++ /dev/null @@ -1,14436 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/container_v1beta1/proto/cluster_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/container_v1beta1/proto/cluster_service.proto", - package="google.container.v1beta1", - syntax="proto3", - serialized_options=b"\n\034com.google.container.v1beta1B\023ClusterServiceProtoP\001ZAgoogle.golang.org/genproto/googleapis/container/v1beta1;container\252\002\036Google.Cloud.Container.V1Beta1\312\002\036Google\\Cloud\\Container\\V1beta1\352\002!Google::Cloud::Container::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n:google/cloud/container_v1beta1/proto/cluster_service.proto\x12\x18google.container.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xec\x05\n\nNodeConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x14\n\x0c\x64isk_size_gb\x18\x02 \x01(\x05\x12\x14\n\x0coauth_scopes\x18\x03 \x03(\t\x12\x17\n\x0fservice_account\x18\t \x01(\t\x12\x44\n\x08metadata\x18\x04 \x03(\x0b\x32\x32.google.container.v1beta1.NodeConfig.MetadataEntry\x12\x12\n\nimage_type\x18\x05 \x01(\t\x12@\n\x06labels\x18\x06 \x03(\x0b\x32\x30.google.container.v1beta1.NodeConfig.LabelsEntry\x12\x17\n\x0flocal_ssd_count\x18\x07 \x01(\x05\x12\x0c\n\x04tags\x18\x08 \x03(\t\x12\x13\n\x0bpreemptible\x18\n \x01(\x08\x12\x41\n\x0c\x61\x63\x63\x65lerators\x18\x0b \x03(\x0b\x32+.google.container.v1beta1.AcceleratorConfig\x12\x11\n\tdisk_type\x18\x0c \x01(\t\x12\x18\n\x10min_cpu_platform\x18\r \x01(\t\x12R\n\x18workload_metadata_config\x18\x0e \x01(\x0b\x32\x30.google.container.v1beta1.WorkloadMetadataConfig\x12\x33\n\x06taints\x18\x0f \x03(\x0b\x32#.google.container.v1beta1.NodeTaint\x12R\n\x18shielded_instance_config\x18\x14 \x01(\x0b\x32\x30.google.container.v1beta1.ShieldedInstanceConfig\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"Y\n\x16ShieldedInstanceConfig\x12\x1a\n\x12\x65nable_secure_boot\x18\x01 \x01(\x08\x12#\n\x1b\x65nable_integrity_monitoring\x18\x02 \x01(\x08"\xbe\x01\n\tNodeTaint\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12:\n\x06\x65\x66\x66\x65\x63t\x18\x03 \x01(\x0e\x32*.google.container.v1beta1.NodeTaint.Effect"Y\n\x06\x45\x66\x66\x65\x63t\x12\x16\n\x12\x45\x46\x46\x45\x43T_UNSPECIFIED\x10\x00\x12\x0f\n\x0bNO_SCHEDULE\x10\x01\x12\x16\n\x12PREFER_NO_SCHEDULE\x10\x02\x12\x0e\n\nNO_EXECUTE\x10\x03"\xd6\x01\n\nMasterAuth\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\x12T\n\x19\x63lient_certificate_config\x18\x03 \x01(\x0b\x32\x31.google.container.v1beta1.ClientCertificateConfig\x12\x1e\n\x16\x63luster_ca_certificate\x18\x64 \x01(\t\x12\x1a\n\x12\x63lient_certificate\x18\x65 \x01(\t\x12\x12\n\nclient_key\x18\x66 \x01(\t";\n\x17\x43lientCertificateConfig\x12 \n\x18issue_client_certificate\x18\x01 \x01(\x08"\xd0\x03\n\x0c\x41\x64\x64onsConfig\x12H\n\x13http_load_balancing\x18\x01 \x01(\x0b\x32+.google.container.v1beta1.HttpLoadBalancing\x12V\n\x1ahorizontal_pod_autoscaling\x18\x02 \x01(\x0b\x32\x32.google.container.v1beta1.HorizontalPodAutoscaling\x12O\n\x14kubernetes_dashboard\x18\x03 \x01(\x0b\x32-.google.container.v1beta1.KubernetesDashboardB\x02\x18\x01\x12L\n\x15network_policy_config\x18\x04 \x01(\x0b\x32-.google.container.v1beta1.NetworkPolicyConfig\x12;\n\x0cistio_config\x18\x05 \x01(\x0b\x32%.google.container.v1beta1.IstioConfig\x12\x42\n\x10\x63loud_run_config\x18\x07 \x01(\x0b\x32(.google.container.v1beta1.CloudRunConfig"%\n\x11HttpLoadBalancing\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08",\n\x18HorizontalPodAutoscaling\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08"\'\n\x13KubernetesDashboard\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08"\'\n\x13NetworkPolicyConfig\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08"\xa8\x01\n\x14PrivateClusterConfig\x12\x1c\n\x14\x65nable_private_nodes\x18\x01 \x01(\x08\x12\x1f\n\x17\x65nable_private_endpoint\x18\x02 \x01(\x08\x12\x1e\n\x16master_ipv4_cidr_block\x18\x03 \x01(\t\x12\x18\n\x10private_endpoint\x18\x04 \x01(\t\x12\x17\n\x0fpublic_endpoint\x18\x05 \x01(\t"\x97\x01\n\x0bIstioConfig\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08\x12\x41\n\x04\x61uth\x18\x02 \x01(\x0e\x32\x33.google.container.v1beta1.IstioConfig.IstioAuthMode"3\n\rIstioAuthMode\x12\r\n\tAUTH_NONE\x10\x00\x12\x13\n\x0f\x41UTH_MUTUAL_TLS\x10\x01""\n\x0e\x43loudRunConfig\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08"\xc1\x01\n\x1eMasterAuthorizedNetworksConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12W\n\x0b\x63idr_blocks\x18\x02 \x03(\x0b\x32\x42.google.container.v1beta1.MasterAuthorizedNetworksConfig.CidrBlock\x1a\x35\n\tCidrBlock\x12\x14\n\x0c\x64isplay_name\x18\x01 \x01(\t\x12\x12\n\ncidr_block\x18\x02 \x01(\t"\x1d\n\nLegacyAbac\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08"\x96\x01\n\rNetworkPolicy\x12\x42\n\x08provider\x18\x01 \x01(\x0e\x32\x30.google.container.v1beta1.NetworkPolicy.Provider\x12\x0f\n\x07\x65nabled\x18\x02 \x01(\x08"0\n\x08Provider\x12\x18\n\x14PROVIDER_UNSPECIFIED\x10\x00\x12\n\n\x06\x43\x41LICO\x10\x01"\xa3\x03\n\x12IPAllocationPolicy\x12\x16\n\x0euse_ip_aliases\x18\x01 \x01(\x08\x12\x19\n\x11\x63reate_subnetwork\x18\x02 \x01(\x08\x12\x17\n\x0fsubnetwork_name\x18\x03 \x01(\t\x12\x1d\n\x11\x63luster_ipv4_cidr\x18\x04 \x01(\tB\x02\x18\x01\x12\x1a\n\x0enode_ipv4_cidr\x18\x05 \x01(\tB\x02\x18\x01\x12\x1e\n\x12services_ipv4_cidr\x18\x06 \x01(\tB\x02\x18\x01\x12$\n\x1c\x63luster_secondary_range_name\x18\x07 \x01(\t\x12%\n\x1dservices_secondary_range_name\x18\x08 \x01(\t\x12\x1f\n\x17\x63luster_ipv4_cidr_block\x18\t \x01(\t\x12\x1c\n\x14node_ipv4_cidr_block\x18\n \x01(\t\x12 \n\x18services_ipv4_cidr_block\x18\x0b \x01(\t\x12\x1b\n\x13\x61llow_route_overlap\x18\x0c \x01(\x08\x12\x1b\n\x13tpu_ipv4_cidr_block\x18\r \x01(\t"&\n\x13\x42inaryAuthorization\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08"*\n\x17PodSecurityPolicyConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08"D\n\x19\x41uthenticatorGroupsConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x16\n\x0esecurity_group\x18\x02 \x01(\t"\xec\x13\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x1e\n\x12initial_node_count\x18\x03 \x01(\x05\x42\x02\x18\x01\x12=\n\x0bnode_config\x18\x04 \x01(\x0b\x32$.google.container.v1beta1.NodeConfigB\x02\x18\x01\x12\x39\n\x0bmaster_auth\x18\x05 \x01(\x0b\x32$.google.container.v1beta1.MasterAuth\x12\x17\n\x0flogging_service\x18\x06 \x01(\t\x12\x1a\n\x12monitoring_service\x18\x07 \x01(\t\x12\x0f\n\x07network\x18\x08 \x01(\t\x12\x19\n\x11\x63luster_ipv4_cidr\x18\t \x01(\t\x12=\n\raddons_config\x18\n \x01(\x0b\x32&.google.container.v1beta1.AddonsConfig\x12\x12\n\nsubnetwork\x18\x0b \x01(\t\x12\x36\n\nnode_pools\x18\x0c \x03(\x0b\x32".google.container.v1beta1.NodePool\x12\x11\n\tlocations\x18\r \x03(\t\x12\x1f\n\x17\x65nable_kubernetes_alpha\x18\x0e \x01(\x08\x12N\n\x0fresource_labels\x18\x0f \x03(\x0b\x32\x35.google.container.v1beta1.Cluster.ResourceLabelsEntry\x12\x19\n\x11label_fingerprint\x18\x10 \x01(\t\x12\x39\n\x0blegacy_abac\x18\x12 \x01(\x0b\x32$.google.container.v1beta1.LegacyAbac\x12?\n\x0enetwork_policy\x18\x13 \x01(\x0b\x32\'.google.container.v1beta1.NetworkPolicy\x12J\n\x14ip_allocation_policy\x18\x14 \x01(\x0b\x32,.google.container.v1beta1.IPAllocationPolicy\x12\x63\n!master_authorized_networks_config\x18\x16 \x01(\x0b\x32\x38.google.container.v1beta1.MasterAuthorizedNetworksConfig\x12G\n\x12maintenance_policy\x18\x17 \x01(\x0b\x32+.google.container.v1beta1.MaintenancePolicy\x12K\n\x14\x62inary_authorization\x18\x18 \x01(\x0b\x32-.google.container.v1beta1.BinaryAuthorization\x12U\n\x1apod_security_policy_config\x18\x19 \x01(\x0b\x32\x31.google.container.v1beta1.PodSecurityPolicyConfig\x12\x41\n\x0b\x61utoscaling\x18\x1a \x01(\x0b\x32,.google.container.v1beta1.ClusterAutoscaling\x12?\n\x0enetwork_config\x18\x1b \x01(\x0b\x32\'.google.container.v1beta1.NetworkConfig\x12\x1b\n\x0fprivate_cluster\x18\x1c \x01(\x08\x42\x02\x18\x01\x12"\n\x16master_ipv4_cidr_block\x18\x1d \x01(\tB\x02\x18\x01\x12P\n\x1b\x64\x65\x66\x61ult_max_pods_constraint\x18\x1e \x01(\x0b\x32+.google.container.v1beta1.MaxPodsConstraint\x12Y\n\x1cresource_usage_export_config\x18! \x01(\x0b\x32\x33.google.container.v1beta1.ResourceUsageExportConfig\x12X\n\x1b\x61uthenticator_groups_config\x18" \x01(\x0b\x32\x33.google.container.v1beta1.AuthenticatorGroupsConfig\x12N\n\x16private_cluster_config\x18% \x01(\x0b\x32..google.container.v1beta1.PrivateClusterConfig\x12R\n\x18vertical_pod_autoscaling\x18\' \x01(\x0b\x32\x30.google.container.v1beta1.VerticalPodAutoscaling\x12\x11\n\tself_link\x18\x64 \x01(\t\x12\x10\n\x04zone\x18\x65 \x01(\tB\x02\x18\x01\x12\x10\n\x08\x65ndpoint\x18\x66 \x01(\t\x12\x1f\n\x17initial_cluster_version\x18g \x01(\t\x12\x1e\n\x16\x63urrent_master_version\x18h \x01(\t\x12 \n\x14\x63urrent_node_version\x18i \x01(\tB\x02\x18\x01\x12\x13\n\x0b\x63reate_time\x18j \x01(\t\x12\x38\n\x06status\x18k \x01(\x0e\x32(.google.container.v1beta1.Cluster.Status\x12\x1a\n\x0estatus_message\x18l \x01(\tB\x02\x18\x01\x12\x1b\n\x13node_ipv4_cidr_size\x18m \x01(\x05\x12\x1a\n\x12services_ipv4_cidr\x18n \x01(\t\x12\x1f\n\x13instance_group_urls\x18o \x03(\tB\x02\x18\x01\x12\x1e\n\x12\x63urrent_node_count\x18p \x01(\x05\x42\x02\x18\x01\x12\x13\n\x0b\x65xpire_time\x18q \x01(\t\x12\x10\n\x08location\x18r \x01(\t\x12\x12\n\nenable_tpu\x18s \x01(\x08\x12\x1b\n\x13tpu_ipv4_cidr_block\x18t \x01(\t\x12I\n\x13\x64\x61tabase_encryption\x18& \x01(\x0b\x32,.google.container.v1beta1.DatabaseEncryption\x12=\n\nconditions\x18v \x03(\x0b\x32).google.container.v1beta1.StatusCondition\x1a\x35\n\x13ResourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"w\n\x06Status\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\x10\n\x0cPROVISIONING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x0f\n\x0bRECONCILING\x10\x03\x12\x0c\n\x08STOPPING\x10\x04\x12\t\n\x05\x45RROR\x10\x05\x12\x0c\n\x08\x44\x45GRADED\x10\x06"\x9a\x08\n\rClusterUpdate\x12\x1c\n\x14\x64\x65sired_node_version\x18\x04 \x01(\t\x12"\n\x1a\x64\x65sired_monitoring_service\x18\x05 \x01(\t\x12\x45\n\x15\x64\x65sired_addons_config\x18\x06 \x01(\x0b\x32&.google.container.v1beta1.AddonsConfig\x12\x1c\n\x14\x64\x65sired_node_pool_id\x18\x07 \x01(\t\x12\x1a\n\x12\x64\x65sired_image_type\x18\x08 \x01(\t\x12T\n\x1d\x64\x65sired_node_pool_autoscaling\x18\t \x01(\x0b\x32-.google.container.v1beta1.NodePoolAutoscaling\x12\x19\n\x11\x64\x65sired_locations\x18\n \x03(\t\x12k\n)desired_master_authorized_networks_config\x18\x0c \x01(\x0b\x32\x38.google.container.v1beta1.MasterAuthorizedNetworksConfig\x12]\n"desired_pod_security_policy_config\x18\x0e \x01(\x0b\x32\x31.google.container.v1beta1.PodSecurityPolicyConfig\x12Q\n\x1b\x64\x65sired_cluster_autoscaling\x18\x0f \x01(\x0b\x32,.google.container.v1beta1.ClusterAutoscaling\x12S\n\x1c\x64\x65sired_binary_authorization\x18\x10 \x01(\x0b\x32-.google.container.v1beta1.BinaryAuthorization\x12\x1f\n\x17\x64\x65sired_logging_service\x18\x13 \x01(\t\x12\x61\n$desired_resource_usage_export_config\x18\x15 \x01(\x0b\x32\x33.google.container.v1beta1.ResourceUsageExportConfig\x12Z\n desired_vertical_pod_autoscaling\x18\x16 \x01(\x0b\x32\x30.google.container.v1beta1.VerticalPodAutoscaling\x12\x61\n$desired_intra_node_visibility_config\x18\x1a \x01(\x0b\x32\x33.google.container.v1beta1.IntraNodeVisibilityConfig\x12\x1e\n\x16\x64\x65sired_master_version\x18\x64 \x01(\t"\xd7\x07\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x04zone\x18\x02 \x01(\tB\x02\x18\x01\x12@\n\x0eoperation_type\x18\x03 \x01(\x0e\x32(.google.container.v1beta1.Operation.Type\x12:\n\x06status\x18\x04 \x01(\x0e\x32*.google.container.v1beta1.Operation.Status\x12\x0e\n\x06\x64\x65tail\x18\x08 \x01(\t\x12\x1a\n\x0estatus_message\x18\x05 \x01(\tB\x02\x18\x01\x12\x11\n\tself_link\x18\x06 \x01(\t\x12\x13\n\x0btarget_link\x18\x07 \x01(\t\x12\x10\n\x08location\x18\t \x01(\t\x12\x12\n\nstart_time\x18\n \x01(\t\x12\x10\n\x08\x65nd_time\x18\x0b \x01(\t\x12=\n\x08progress\x18\x0c \x01(\x0b\x32+.google.container.v1beta1.OperationProgress\x12\x45\n\x12\x63luster_conditions\x18\r \x03(\x0b\x32).google.container.v1beta1.StatusCondition\x12\x46\n\x13nodepool_conditions\x18\x0e \x03(\x0b\x32).google.container.v1beta1.StatusCondition"R\n\x06Status\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03\x12\x0c\n\x08\x41\x42ORTING\x10\x04"\xfd\x02\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0e\x43REATE_CLUSTER\x10\x01\x12\x12\n\x0e\x44\x45LETE_CLUSTER\x10\x02\x12\x12\n\x0eUPGRADE_MASTER\x10\x03\x12\x11\n\rUPGRADE_NODES\x10\x04\x12\x12\n\x0eREPAIR_CLUSTER\x10\x05\x12\x12\n\x0eUPDATE_CLUSTER\x10\x06\x12\x14\n\x10\x43REATE_NODE_POOL\x10\x07\x12\x14\n\x10\x44\x45LETE_NODE_POOL\x10\x08\x12\x1c\n\x18SET_NODE_POOL_MANAGEMENT\x10\t\x12\x15\n\x11\x41UTO_REPAIR_NODES\x10\n\x12\x16\n\x12\x41UTO_UPGRADE_NODES\x10\x0b\x12\x0e\n\nSET_LABELS\x10\x0c\x12\x13\n\x0fSET_MASTER_AUTH\x10\r\x12\x16\n\x12SET_NODE_POOL_SIZE\x10\x0e\x12\x16\n\x12SET_NETWORK_POLICY\x10\x0f\x12\x1a\n\x16SET_MAINTENANCE_POLICY\x10\x10"\xc5\x02\n\x11OperationProgress\x12\x0c\n\x04name\x18\x01 \x01(\t\x12:\n\x06status\x18\x02 \x01(\x0e\x32*.google.container.v1beta1.Operation.Status\x12\x43\n\x07metrics\x18\x03 \x03(\x0b\x32\x32.google.container.v1beta1.OperationProgress.Metric\x12;\n\x06stages\x18\x04 \x03(\x0b\x32+.google.container.v1beta1.OperationProgress\x1a\x64\n\x06Metric\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\tint_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x04 \x01(\tH\x00\x42\x07\n\x05value"\x8f\x01\n\x14\x43reateClusterRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.container.v1beta1.ClusterB\x03\xe0\x41\x02\x12\x0e\n\x06parent\x18\x05 \x01(\t"l\n\x11GetClusterRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x05 \x01(\t"\xad\x01\n\x14UpdateClusterRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12<\n\x06update\x18\x04 \x01(\x0b\x32\'.google.container.v1beta1.ClusterUpdateB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x05 \x01(\t"\x95\x02\n\x15UpdateNodePoolRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0cnode_pool_id\x18\x04 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\x0cnode_version\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x17\n\nimage_type\x18\x06 \x01(\tB\x03\xe0\x41\x02\x12R\n\x18workload_metadata_config\x18\x0e \x01(\x0b\x32\x30.google.container.v1beta1.WorkloadMetadataConfig\x12\x0c\n\x04name\x18\x08 \x01(\t"\xde\x01\n\x1dSetNodePoolAutoscalingRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0cnode_pool_id\x18\x04 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12G\n\x0b\x61utoscaling\x18\x05 \x01(\x0b\x32-.google.container.v1beta1.NodePoolAutoscalingB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\x91\x01\n\x18SetLoggingServiceRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1c\n\x0flogging_service\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x05 \x01(\t"\x97\x01\n\x1bSetMonitoringServiceRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1f\n\x12monitoring_service\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\xb5\x01\n\x16SetAddonsConfigRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x42\n\raddons_config\x18\x04 \x01(\x0b\x32&.google.container.v1beta1.AddonsConfigB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\x86\x01\n\x13SetLocationsRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x16\n\tlocations\x18\x04 \x03(\tB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\x8b\x01\n\x13UpdateMasterRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0emaster_version\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t"\xc8\x02\n\x14SetMasterAuthRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12J\n\x06\x61\x63tion\x18\x04 \x01(\x0e\x32\x35.google.container.v1beta1.SetMasterAuthRequest.ActionB\x03\xe0\x41\x02\x12\x39\n\x06update\x18\x05 \x01(\x0b\x32$.google.container.v1beta1.MasterAuthB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t"P\n\x06\x41\x63tion\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x10\n\x0cSET_PASSWORD\x10\x01\x12\x15\n\x11GENERATE_PASSWORD\x10\x02\x12\x10\n\x0cSET_USERNAME\x10\x03"o\n\x14\x44\x65leteClusterRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x04 \x01(\t"U\n\x13ListClustersRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0e\n\x06parent\x18\x04 \x01(\t"b\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.container.v1beta1.Cluster\x12\x15\n\rmissing_zones\x18\x02 \x03(\t"p\n\x13GetOperationRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0coperation_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x05 \x01(\t"W\n\x15ListOperationsRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0e\n\x06parent\x18\x04 \x01(\t"s\n\x16\x43\x61ncelOperationRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0coperation_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x04 \x01(\t"h\n\x16ListOperationsResponse\x12\x37\n\noperations\x18\x01 \x03(\x0b\x32#.google.container.v1beta1.Operation\x12\x15\n\rmissing_zones\x18\x02 \x03(\t"V\n\x16GetServerConfigRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x04 \x01(\t"\xa2\x01\n\x0cServerConfig\x12\x1f\n\x17\x64\x65\x66\x61ult_cluster_version\x18\x01 \x01(\t\x12\x1b\n\x13valid_node_versions\x18\x03 \x03(\t\x12\x1a\n\x12\x64\x65\x66\x61ult_image_type\x18\x04 \x01(\t\x12\x19\n\x11valid_image_types\x18\x05 \x03(\t\x12\x1d\n\x15valid_master_versions\x18\x06 \x03(\t"\xae\x01\n\x15\x43reateNodePoolRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12:\n\tnode_pool\x18\x04 \x01(\x0b\x32".google.container.v1beta1.NodePoolB\x03\xe0\x41\x02\x12\x0e\n\x06parent\x18\x06 \x01(\t"\x8d\x01\n\x15\x44\x65leteNodePoolRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0cnode_pool_id\x18\x04 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"q\n\x14ListNodePoolsRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0e\n\x06parent\x18\x05 \x01(\t"\x8a\x01\n\x12GetNodePoolRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0cnode_pool_id\x18\x04 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\xad\x05\n\x08NodePool\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x06\x63onfig\x18\x02 \x01(\x0b\x32$.google.container.v1beta1.NodeConfig\x12\x1a\n\x12initial_node_count\x18\x03 \x01(\x05\x12\x11\n\tself_link\x18\x64 \x01(\t\x12\x0f\n\x07version\x18\x65 \x01(\t\x12\x1b\n\x13instance_group_urls\x18\x66 \x03(\t\x12\x39\n\x06status\x18g \x01(\x0e\x32).google.container.v1beta1.NodePool.Status\x12\x1a\n\x0estatus_message\x18h \x01(\tB\x02\x18\x01\x12\x42\n\x0b\x61utoscaling\x18\x04 \x01(\x0b\x32-.google.container.v1beta1.NodePoolAutoscaling\x12<\n\nmanagement\x18\x05 \x01(\x0b\x32(.google.container.v1beta1.NodeManagement\x12H\n\x13max_pods_constraint\x18\x06 \x01(\x0b\x32+.google.container.v1beta1.MaxPodsConstraint\x12=\n\nconditions\x18i \x03(\x0b\x32).google.container.v1beta1.StatusCondition\x12\x1a\n\x12pod_ipv4_cidr_size\x18\x07 \x01(\x05"\x81\x01\n\x06Status\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\x10\n\x0cPROVISIONING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x16\n\x12RUNNING_WITH_ERROR\x10\x03\x12\x0f\n\x0bRECONCILING\x10\x04\x12\x0c\n\x08STOPPING\x10\x05\x12\t\n\x05\x45RROR\x10\x06"\x82\x01\n\x0eNodeManagement\x12\x14\n\x0c\x61uto_upgrade\x18\x01 \x01(\x08\x12\x13\n\x0b\x61uto_repair\x18\x02 \x01(\x08\x12\x45\n\x0fupgrade_options\x18\n \x01(\x0b\x32,.google.container.v1beta1.AutoUpgradeOptions"J\n\x12\x41utoUpgradeOptions\x12\x1f\n\x17\x61uto_upgrade_start_time\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t"j\n\x11MaintenancePolicy\x12;\n\x06window\x18\x01 \x01(\x0b\x32+.google.container.v1beta1.MaintenanceWindow\x12\x18\n\x10resource_version\x18\x03 \x01(\t"\x8a\x03\n\x11MaintenanceWindow\x12T\n\x18\x64\x61ily_maintenance_window\x18\x02 \x01(\x0b\x32\x30.google.container.v1beta1.DailyMaintenanceWindowH\x00\x12I\n\x10recurring_window\x18\x03 \x01(\x0b\x32-.google.container.v1beta1.RecurringTimeWindowH\x00\x12\x66\n\x16maintenance_exclusions\x18\x04 \x03(\x0b\x32\x46.google.container.v1beta1.MaintenanceWindow.MaintenanceExclusionsEntry\x1a\x62\n\x1aMaintenanceExclusionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.google.container.v1beta1.TimeWindow:\x02\x38\x01\x42\x08\n\x06policy"j\n\nTimeWindow\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"_\n\x13RecurringTimeWindow\x12\x34\n\x06window\x18\x01 \x01(\x0b\x32$.google.container.v1beta1.TimeWindow\x12\x12\n\nrecurrence\x18\x02 \x01(\t">\n\x16\x44\x61ilyMaintenanceWindow\x12\x12\n\nstart_time\x18\x02 \x01(\t\x12\x10\n\x08\x64uration\x18\x03 \x01(\t"\xd7\x01\n\x1cSetNodePoolManagementRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0cnode_pool_id\x18\x04 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x41\n\nmanagement\x18\x05 \x01(\x0b\x32(.google.container.v1beta1.NodeManagementB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t"\xa7\x01\n\x16SetNodePoolSizeRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0cnode_pool_id\x18\x04 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x17\n\nnode_count\x18\x05 \x01(\x05\x42\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t"\x96\x01\n\x1eRollbackNodePoolUpgradeRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x1b\n\x0cnode_pool_id\x18\x04 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"O\n\x15ListNodePoolsResponse\x12\x36\n\nnode_pools\x18\x01 \x03(\x0b\x32".google.container.v1beta1.NodePool"\x89\x02\n\x12\x43lusterAutoscaling\x12$\n\x1c\x65nable_node_autoprovisioning\x18\x01 \x01(\x08\x12@\n\x0fresource_limits\x18\x02 \x03(\x0b\x32\'.google.container.v1beta1.ResourceLimit\x12g\n#autoprovisioning_node_pool_defaults\x18\x04 \x01(\x0b\x32:.google.container.v1beta1.AutoprovisioningNodePoolDefaults\x12"\n\x1a\x61utoprovisioning_locations\x18\x05 \x03(\t"Q\n AutoprovisioningNodePoolDefaults\x12\x14\n\x0coauth_scopes\x18\x01 \x03(\t\x12\x17\n\x0fservice_account\x18\x02 \x01(\t"H\n\rResourceLimit\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x0f\n\x07minimum\x18\x02 \x01(\x03\x12\x0f\n\x07maximum\x18\x03 \x01(\x03"o\n\x13NodePoolAutoscaling\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x16\n\x0emin_node_count\x18\x02 \x01(\x05\x12\x16\n\x0emax_node_count\x18\x03 \x01(\x05\x12\x17\n\x0f\x61utoprovisioned\x18\x04 \x01(\x08"\xa0\x02\n\x10SetLabelsRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\\\n\x0fresource_labels\x18\x04 \x03(\x0b\x32>.google.container.v1beta1.SetLabelsRequest.ResourceLabelsEntryB\x03\xe0\x41\x02\x12\x1e\n\x11label_fingerprint\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t\x1a\x35\n\x13ResourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x85\x01\n\x14SetLegacyAbacRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x14\n\x07\x65nabled\x18\x04 \x01(\x08\x42\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\x8d\x01\n\x16StartIPRotationRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x1a\n\x12rotate_credentials\x18\x07 \x01(\x08"t\n\x19\x43ompleteIPRotationRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x0c\n\x04name\x18\x07 \x01(\t"H\n\x11\x41\x63\x63\x65leratorConfig\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x01 \x01(\x03\x12\x18\n\x10\x61\x63\x63\x65lerator_type\x18\x02 \x01(\t"\xa7\x01\n\x16WorkloadMetadataConfig\x12T\n\rnode_metadata\x18\x01 \x01(\x0e\x32=.google.container.v1beta1.WorkloadMetadataConfig.NodeMetadata"7\n\x0cNodeMetadata\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\n\n\x06SECURE\x10\x01\x12\n\n\x06\x45XPOSE\x10\x02"\xb8\x01\n\x17SetNetworkPolicyRequest\x12\x19\n\nproject_id\x18\x01 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x13\n\x04zone\x18\x02 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x19\n\ncluster_id\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x44\n\x0enetwork_policy\x18\x04 \x01(\x0b\x32\'.google.container.v1beta1.NetworkPolicyB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x06 \x01(\t"\xbe\x01\n\x1bSetMaintenancePolicyRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04zone\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x17\n\ncluster_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12L\n\x12maintenance_policy\x18\x04 \x01(\x0b\x32+.google.container.v1beta1.MaintenancePolicyB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x05 \x01(\t"+\n\x14ListLocationsRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0\x41\x02"g\n\x15ListLocationsResponse\x12\x35\n\tlocations\x18\x01 \x03(\x0b\x32".google.container.v1beta1.Location\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\xb1\x01\n\x08Location\x12=\n\x04type\x18\x01 \x01(\x0e\x32/.google.container.v1beta1.Location.LocationType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0brecommended\x18\x03 \x01(\x08"C\n\x0cLocationType\x12\x1d\n\x19LOCATION_TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04ZONE\x10\x01\x12\n\n\x06REGION\x10\x02"\xef\x01\n\x0fStatusCondition\x12<\n\x04\x63ode\x18\x01 \x01(\x0e\x32..google.container.v1beta1.StatusCondition.Code\x12\x0f\n\x07message\x18\x02 \x01(\t"\x8c\x01\n\x04\x43ode\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x10\n\x0cGCE_STOCKOUT\x10\x01\x12\x1f\n\x1bGKE_SERVICE_ACCOUNT_DELETED\x10\x02\x12\x16\n\x12GCE_QUOTA_EXCEEDED\x10\x03\x12\x13\n\x0fSET_BY_OPERATOR\x10\x04\x12\x17\n\x13\x43LOUD_KMS_KEY_ERROR\x10\x07"Z\n\rNetworkConfig\x12\x0f\n\x07network\x18\x01 \x01(\t\x12\x12\n\nsubnetwork\x18\x02 \x01(\t\x12$\n\x1c\x65nable_intra_node_visibility\x18\x05 \x01(\x08"j\n\x1cListUsableSubnetworksRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"y\n\x1dListUsableSubnetworksResponse\x12?\n\x0bsubnetworks\x18\x01 \x03(\x0b\x32*.google.container.v1beta1.UsableSubnetwork\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x85\x02\n\x1eUsableSubnetworkSecondaryRange\x12\x12\n\nrange_name\x18\x01 \x01(\t\x12\x15\n\rip_cidr_range\x18\x02 \x01(\t\x12O\n\x06status\x18\x03 \x01(\x0e\x32?.google.container.v1beta1.UsableSubnetworkSecondaryRange.Status"g\n\x06Status\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06UNUSED\x10\x01\x12\x12\n\x0eIN_USE_SERVICE\x10\x02\x12\x18\n\x14IN_USE_SHAREABLE_POD\x10\x03\x12\x16\n\x12IN_USE_MANAGED_POD\x10\x04"\xbd\x01\n\x10UsableSubnetwork\x12\x12\n\nsubnetwork\x18\x01 \x01(\t\x12\x0f\n\x07network\x18\x02 \x01(\t\x12\x15\n\rip_cidr_range\x18\x03 \x01(\t\x12U\n\x13secondary_ip_ranges\x18\x04 \x03(\x0b\x32\x38.google.container.v1beta1.UsableSubnetworkSecondaryRange\x12\x16\n\x0estatus_message\x18\x05 \x01(\t")\n\x16VerticalPodAutoscaling\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08",\n\x19IntraNodeVisibilityConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08".\n\x11MaxPodsConstraint\x12\x19\n\x11max_pods_per_node\x18\x01 \x01(\x03"\x9d\x01\n\x12\x44\x61tabaseEncryption\x12\x41\n\x05state\x18\x02 \x01(\x0e\x32\x32.google.container.v1beta1.DatabaseEncryption.State\x12\x10\n\x08key_name\x18\x01 \x01(\t"2\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tENCRYPTED\x10\x01\x12\r\n\tDECRYPTED\x10\x02"\xf7\x02\n\x19ResourceUsageExportConfig\x12\x65\n\x14\x62igquery_destination\x18\x01 \x01(\x0b\x32G.google.container.v1beta1.ResourceUsageExportConfig.BigQueryDestination\x12&\n\x1e\x65nable_network_egress_metering\x18\x02 \x01(\x08\x12r\n\x1b\x63onsumption_metering_config\x18\x03 \x01(\x0b\x32M.google.container.v1beta1.ResourceUsageExportConfig.ConsumptionMeteringConfig\x1a)\n\x13\x42igQueryDestination\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x1a,\n\x19\x43onsumptionMeteringConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x32\xafI\n\x0e\x43lusterManager\x12\xf3\x01\n\x0cListClusters\x12-.google.container.v1beta1.ListClustersRequest\x1a..google.container.v1beta1.ListClustersResponse"\x83\x01\x82\xd3\xe4\x93\x02k\x12\x31/v1beta1/{parent=projects/*/locations/*}/clustersZ6\x12\x34/v1beta1/projects/{project_id}/zones/{zone}/clusters\xda\x41\x0fproject_id,zone\x12\xfa\x01\n\nGetCluster\x12+.google.container.v1beta1.GetClusterRequest\x1a!.google.container.v1beta1.Cluster"\x9b\x01\x82\xd3\xe4\x93\x02x\x12\x31/v1beta1/{name=projects/*/locations/*/clusters/*}ZC\x12\x41/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}\xda\x41\x1aproject_id,zone,cluster_id\x12\xf8\x01\n\rCreateCluster\x12..google.container.v1beta1.CreateClusterRequest\x1a#.google.container.v1beta1.Operation"\x91\x01\x82\xd3\xe4\x93\x02q"1/v1beta1/{parent=projects/*/locations/*}/clusters:\x01*Z9"4/v1beta1/projects/{project_id}/zones/{zone}/clusters:\x01*\xda\x41\x17project_id,zone,cluster\x12\x8f\x02\n\rUpdateCluster\x12..google.container.v1beta1.UpdateClusterRequest\x1a#.google.container.v1beta1.Operation"\xa8\x01\x82\xd3\xe4\x93\x02~\x1a\x31/v1beta1/{name=projects/*/locations/*/clusters/*}:\x01*ZF\x1a\x41/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:\x01*\xda\x41!project_id,zone,cluster_id,update\x12\x9a\x02\n\x0eUpdateNodePool\x12/.google.container.v1beta1.UpdateNodePoolRequest\x1a#.google.container.v1beta1.Operation"\xb1\x01\x82\xd3\xe4\x93\x02\xaa\x01\x1a=/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:\x01*Zf"a/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/update:\x01*\x12\xbe\x02\n\x16SetNodePoolAutoscaling\x12\x37.google.container.v1beta1.SetNodePoolAutoscalingRequest\x1a#.google.container.v1beta1.Operation"\xc5\x01\x82\xd3\xe4\x93\x02\xbe\x01"L/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:setAutoscaling:\x01*Zk"f/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/autoscaling:\x01*\x12\xb4\x02\n\x11SetLoggingService\x12\x32.google.container.v1beta1.SetLoggingServiceRequest\x1a#.google.container.v1beta1.Operation"\xc5\x01\x82\xd3\xe4\x93\x02\x91\x01"/v1beta1/{name=projects/*/locations/*/clusters/*}:setLocations:\x01*ZP"K/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/locations:\x01*\xda\x41$project_id,zone,cluster_id,locations\x12\xaa\x02\n\x0cUpdateMaster\x12-.google.container.v1beta1.UpdateMasterRequest\x1a#.google.container.v1beta1.Operation"\xc5\x01\x82\xd3\xe4\x93\x02\x92\x01">/v1beta1/{name=projects/*/locations/*/clusters/*}:updateMaster:\x01*ZM"H/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/master:\x01*\xda\x41)project_id,zone,cluster_id,master_version\x12\x88\x02\n\rSetMasterAuth\x12..google.container.v1beta1.SetMasterAuthRequest\x1a#.google.container.v1beta1.Operation"\xa1\x01\x82\xd3\xe4\x93\x02\x9a\x01"?/v1beta1/{name=projects/*/locations/*/clusters/*}:setMasterAuth:\x01*ZT"O/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setMasterAuth:\x01*\x12\x82\x02\n\rDeleteCluster\x12..google.container.v1beta1.DeleteClusterRequest\x1a#.google.container.v1beta1.Operation"\x9b\x01\x82\xd3\xe4\x93\x02x*1/v1beta1/{name=projects/*/locations/*/clusters/*}ZC*A/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}\xda\x41\x1aproject_id,zone,cluster_id\x12\xfd\x01\n\x0eListOperations\x12/.google.container.v1beta1.ListOperationsRequest\x1a\x30.google.container.v1beta1.ListOperationsResponse"\x87\x01\x82\xd3\xe4\x93\x02o\x12\x33/v1beta1/{parent=projects/*/locations/*}/operationsZ8\x12\x36/v1beta1/projects/{project_id}/zones/{zone}/operations\xda\x41\x0fproject_id,zone\x12\x88\x02\n\x0cGetOperation\x12-.google.container.v1beta1.GetOperationRequest\x1a#.google.container.v1beta1.Operation"\xa3\x01\x82\xd3\xe4\x93\x02~\x12\x33/v1beta1/{name=projects/*/locations/*/operations/*}ZG\x12\x45/v1beta1/projects/{project_id}/zones/{zone}/operations/{operation_id}\xda\x41\x1cproject_id,zone,operation_id\x12\x96\x02\n\x0f\x43\x61ncelOperation\x12\x30.google.container.v1beta1.CancelOperationRequest\x1a\x16.google.protobuf.Empty"\xb8\x01\x82\xd3\xe4\x93\x02\x92\x01":/v1beta1/{name=projects/*/locations/*/operations/*}:cancel:\x01*ZQ"L/v1beta1/projects/{project_id}/zones/{zone}/operations/{operation_id}:cancel:\x01*\xda\x41\x1cproject_id,zone,operation_id\x12\xf7\x01\n\x0fGetServerConfig\x12\x30.google.container.v1beta1.GetServerConfigRequest\x1a&.google.container.v1beta1.ServerConfig"\x89\x01\x82\xd3\xe4\x93\x02q\x12\x33/v1beta1/{name=projects/*/locations/*}/serverConfigZ:\x12\x38/v1beta1/projects/{project_id}/zones/{zone}/serverconfig\xda\x41\x0fproject_id,zone\x12\xa5\x02\n\rListNodePools\x12..google.container.v1beta1.ListNodePoolsRequest\x1a/.google.container.v1beta1.ListNodePoolsResponse"\xb2\x01\x82\xd3\xe4\x93\x02\x8e\x01\x12=/v1beta1/{parent=projects/*/locations/*/clusters/*}/nodePoolsZM\x12K/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools\xda\x41\x1aproject_id,zone,cluster_id\x12\xb0\x02\n\x0bGetNodePool\x12,.google.container.v1beta1.GetNodePoolRequest\x1a".google.container.v1beta1.NodePool"\xce\x01\x82\xd3\xe4\x93\x02\x9d\x01\x12=/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}Z\\\x12Z/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}\xda\x41\'project_id,zone,cluster_id,node_pool_id\x12\xab\x02\n\x0e\x43reateNodePool\x12/.google.container.v1beta1.CreateNodePoolRequest\x1a#.google.container.v1beta1.Operation"\xc2\x01\x82\xd3\xe4\x93\x02\x94\x01"=/v1beta1/{parent=projects/*/locations/*/clusters/*}/nodePools:\x01*ZP"K/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools:\x01*\xda\x41$project_id,zone,cluster_id,node_pool\x12\xb7\x02\n\x0e\x44\x65leteNodePool\x12/.google.container.v1beta1.DeleteNodePoolRequest\x1a#.google.container.v1beta1.Operation"\xce\x01\x82\xd3\xe4\x93\x02\x9d\x01*=/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}Z\\*Z/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}\xda\x41\'project_id,zone,cluster_id,node_pool_id\x12\xe1\x02\n\x17RollbackNodePoolUpgrade\x12\x38.google.container.v1beta1.RollbackNodePoolUpgradeRequest\x1a#.google.container.v1beta1.Operation"\xe6\x01\x82\xd3\xe4\x93\x02\xb5\x01"F/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:rollback:\x01*Zh"c/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}:rollback:\x01*\xda\x41\'project_id,zone,cluster_id,node_pool_id\x12\xf2\x02\n\x15SetNodePoolManagement\x12\x36.google.container.v1beta1.SetNodePoolManagementRequest\x1a#.google.container.v1beta1.Operation"\xfb\x01\x82\xd3\xe4\x93\x02\xbf\x01"K/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:setManagement:\x01*Zm"h/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/setManagement:\x01*\xda\x41\x32project_id,zone,cluster_id,node_pool_id,management\x12\xc4\x02\n\tSetLabels\x12*.google.container.v1beta1.SetLabelsRequest\x1a#.google.container.v1beta1.Operation"\xe5\x01\x82\xd3\xe4\x93\x02\x9f\x01"C/v1beta1/{name=projects/*/locations/*/clusters/*}:setResourceLabels:\x01*ZU"P/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/resourceLabels:\x01*\xda\x41`__ (e.g. - ``n1-standard-1``). If unspecified, the default machine type - is ``n1-standard-1``. - disk_size_gb: - Size of the disk attached to each node, specified in GB. The - smallest allowed disk size is 10GB. If unspecified, the - default disk size is 100GB. - oauth_scopes: - The set of Google API scopes to be made available on all of - the node VMs under the “default” service account. The - following scopes are recommended, but not required, and by - default are not included: - - ``https://www.googleapis.com/auth/compute`` is required for - mounting persistent storage on your nodes. - - ``https://www.googleapis.com/auth/devstorage.read_only`` is - required for communicating with **gcr.io** (the `Google - Container Registry `__). If unspecified, no scopes are added, unless - Cloud Logging or Cloud Monitoring are enabled, in which case - their required scopes will be added. - service_account: - The Google Cloud Platform Service Account to be used by the - node VMs. If no Service Account is specified, the “default” - service account is used. - metadata: - The metadata key/value pairs assigned to instances in the - cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and - be less than 128 bytes in length. These are reflected as part - of a URL in the metadata server. Additionally, to avoid - ambiguity, keys must not conflict with any other metadata keys - for the project or be one of the reserved keys: “cluster- - location” “cluster-name” “cluster-uid” “configure-sh” - “containerd-configure-sh” “enable-oslogin” “gci-ensure-gke- - docker” “gci-metrics-enabled” “gci-update-strategy” “instance- - template” “kube-env” “startup-script” “user-data” “disable- - address-manager” “windows-startup-script-ps1” “common-psm1” - “k8s-node-setup-psm1” “install-ssh-psm1” “user-profile-psm1” - “serial-port-logging-enable” Values are free-form strings, and - only have meaning as interpreted by the image running in the - instance. The only restriction placed on them is that each - value’s size must be less than or equal to 32 KB. The total - size of all keys and values must be less than 512 KB. - image_type: - The image type to use for this node. Note that for a given - image type, the latest version of it will be used. - labels: - The map of Kubernetes labels (key/value pairs) to be applied - to each node. These will added in addition to any default - label(s) that Kubernetes may apply to the node. In case of - conflict in label keys, the applied set may differ depending - on the Kubernetes version – it’s best to assume the behavior - is undefined and conflicts should be avoided. For more - information, including usage and the valid values, see: - https://kubernetes.io/docs/concepts/overview/working-with- - objects/labels/ - local_ssd_count: - The number of local SSD disks to be attached to the node. The - limit for this value is dependent upon the maximum number of - disks available on a machine per zone. See: - https://cloud.google.com/compute/docs/disks/local-ssd for more - information. - tags: - The list of instance tags applied to all nodes. Tags are used - to identify valid sources or targets for network firewalls and - are specified by the client during cluster or node pool - creation. Each tag within the list must comply with RFC1035. - preemptible: - Whether the nodes are created as preemptible VM instances. - See: - https://cloud.google.com/compute/docs/instances/preemptible - for more inforamtion about preemptible VM instances. - accelerators: - A list of hardware accelerators to be attached to each node. - See https://cloud.google.com/compute/docs/gpus for more - information about support for GPUs. - disk_type: - Type of the disk attached to each node (e.g. ‘pd-standard’ or - ‘pd-ssd’) If unspecified, the default disk type is ‘pd- - standard’ - min_cpu_platform: - Minimum CPU platform to be used by this instance. The instance - may be scheduled on the specified or newer CPU platform. - Applicable values are the friendly names of CPU platforms, - such as minCpuPlatform: “Intel Haswell” or minCpuPlatform: - “Intel Sandy Bridge”. For more information, read `how to - specify min CPU platform - `__ To unset the min cpu platform field pass - “automatic” as field value. - workload_metadata_config: - The workload metadata configuration for this node. - taints: - List of kubernetes taints to be applied to each node. For - more information, including usage and the valid values, see: - https://kubernetes.io/docs/concepts/configuration/taint-and- - toleration/ - shielded_instance_config: - Shielded Instance options. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NodeConfig) - }, -) -_sym_db.RegisterMessage(NodeConfig) -_sym_db.RegisterMessage(NodeConfig.MetadataEntry) -_sym_db.RegisterMessage(NodeConfig.LabelsEntry) - -ShieldedInstanceConfig = _reflection.GeneratedProtocolMessageType( - "ShieldedInstanceConfig", - (_message.Message,), - { - "DESCRIPTOR": _SHIELDEDINSTANCECONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """A set of Shielded Instance options. - - Attributes: - enable_secure_boot: - Defines whether the instance has Secure Boot enabled. Secure - Boot helps ensure that the system only runs authentic software - by verifying the digital signature of all boot components, and - halting the boot process if signature verification fails. - enable_integrity_monitoring: - Defines whether the instance has integrity monitoring enabled. - Enables monitoring and attestation of the boot integrity of - the instance. The attestation is performed against the - integrity policy baseline. This baseline is initially derived - from the implicitly trusted boot image when the instance is - created. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ShieldedInstanceConfig) - }, -) -_sym_db.RegisterMessage(ShieldedInstanceConfig) - -NodeTaint = _reflection.GeneratedProtocolMessageType( - "NodeTaint", - (_message.Message,), - { - "DESCRIPTOR": _NODETAINT, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Kubernetes taint is comprised of three fields: key, value, and effect. - Effect can only be one of three types: NoSchedule, PreferNoSchedule or - NoExecute. For more information, including usage and the valid - values, see: https://kubernetes.io/docs/concepts/configuration/taint- - and-toleration/ - - Attributes: - key: - Key for taint. - value: - Value for taint. - effect: - Effect for taint. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NodeTaint) - }, -) -_sym_db.RegisterMessage(NodeTaint) - -MasterAuth = _reflection.GeneratedProtocolMessageType( - "MasterAuth", - (_message.Message,), - { - "DESCRIPTOR": _MASTERAUTH, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """The authentication information for accessing the master endpoint. - Authentication can be done using HTTP basic auth or using client - certificates. - - Attributes: - username: - The username to use for HTTP basic authentication to the - master endpoint. For clusters v1.6.0 and later, basic - authentication can be disabled by leaving username unspecified - (or setting it to the empty string). - password: - The password to use for HTTP basic authentication to the - master endpoint. Because the master endpoint is open to the - Internet, you should create a strong password. If a password - is provided for cluster creation, username must be non-empty. - client_certificate_config: - Configuration for client certificate authentication on the - cluster. For clusters before v1.12, if no configuration is - specified, a client certificate is issued. - cluster_ca_certificate: - [Output only] Base64-encoded public certificate that is the - root of trust for the cluster. - client_certificate: - [Output only] Base64-encoded public certificate used by - clients to authenticate to the cluster endpoint. - client_key: - [Output only] Base64-encoded private key used by clients to - authenticate to the cluster endpoint. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.MasterAuth) - }, -) -_sym_db.RegisterMessage(MasterAuth) - -ClientCertificateConfig = _reflection.GeneratedProtocolMessageType( - "ClientCertificateConfig", - (_message.Message,), - { - "DESCRIPTOR": _CLIENTCERTIFICATECONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for client certificates on the cluster. - - Attributes: - issue_client_certificate: - Issue a client certificate. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ClientCertificateConfig) - }, -) -_sym_db.RegisterMessage(ClientCertificateConfig) - -AddonsConfig = _reflection.GeneratedProtocolMessageType( - "AddonsConfig", - (_message.Message,), - { - "DESCRIPTOR": _ADDONSCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for the addons that can be automatically spun up in the - cluster, enabling additional functionality. - - Attributes: - http_load_balancing: - Configuration for the HTTP (L7) load balancing controller - addon, which makes it easy to set up HTTP load balancers for - services in a cluster. - horizontal_pod_autoscaling: - Configuration for the horizontal pod autoscaling feature, - which increases or decreases the number of replica pods a - replication controller has based on the resource usage of the - existing pods. - kubernetes_dashboard: - Configuration for the Kubernetes Dashboard. This addon is - deprecated, and will be disabled in 1.15. It is recommended to - use the Cloud Console to manage and monitor your Kubernetes - clusters, workloads and applications. For more information, - see: https://cloud.google.com/kubernetes- - engine/docs/concepts/dashboards - network_policy_config: - Configuration for NetworkPolicy. This only tracks whether the - addon is enabled or not on the Master, it does not track - whether network policy is enabled for the nodes. - istio_config: - Configuration for Istio, an open platform to connect, manage, - and secure microservices. - cloud_run_config: - Configuration for the Cloud Run addon. The ``IstioConfig`` - addon must be enabled in order to enable Cloud Run addon. This - option can only be enabled at cluster creation time. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.AddonsConfig) - }, -) -_sym_db.RegisterMessage(AddonsConfig) - -HttpLoadBalancing = _reflection.GeneratedProtocolMessageType( - "HttpLoadBalancing", - (_message.Message,), - { - "DESCRIPTOR": _HTTPLOADBALANCING, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the HTTP (L7) load balancing controller - addon, which makes it easy to set up HTTP load balancers for services - in a cluster. - - Attributes: - disabled: - Whether the HTTP Load Balancing controller is enabled in the - cluster. When enabled, it runs a small pod in the cluster that - manages the load balancers. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.HttpLoadBalancing) - }, -) -_sym_db.RegisterMessage(HttpLoadBalancing) - -HorizontalPodAutoscaling = _reflection.GeneratedProtocolMessageType( - "HorizontalPodAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _HORIZONTALPODAUTOSCALING, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the horizontal pod autoscaling feature, - which increases or decreases the number of replica pods a replication - controller has based on the resource usage of the existing pods. - - Attributes: - disabled: - Whether the Horizontal Pod Autoscaling feature is enabled in - the cluster. When enabled, it ensures that a Heapster pod is - running in the cluster, which is also used by the Cloud - Monitoring service. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.HorizontalPodAutoscaling) - }, -) -_sym_db.RegisterMessage(HorizontalPodAutoscaling) - -KubernetesDashboard = _reflection.GeneratedProtocolMessageType( - "KubernetesDashboard", - (_message.Message,), - { - "DESCRIPTOR": _KUBERNETESDASHBOARD, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for the Kubernetes Dashboard. - - Attributes: - disabled: - Whether the Kubernetes Dashboard is enabled for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.KubernetesDashboard) - }, -) -_sym_db.RegisterMessage(KubernetesDashboard) - -NetworkPolicyConfig = _reflection.GeneratedProtocolMessageType( - "NetworkPolicyConfig", - (_message.Message,), - { - "DESCRIPTOR": _NETWORKPOLICYCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for NetworkPolicy. This only tracks whether the addon is - enabled or not on the Master, it does not track whether network policy - is enabled for the nodes. - - Attributes: - disabled: - Whether NetworkPolicy is enabled for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NetworkPolicyConfig) - }, -) -_sym_db.RegisterMessage(NetworkPolicyConfig) - -PrivateClusterConfig = _reflection.GeneratedProtocolMessageType( - "PrivateClusterConfig", - (_message.Message,), - { - "DESCRIPTOR": _PRIVATECLUSTERCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration options for private clusters. - - Attributes: - enable_private_nodes: - Whether nodes have internal IP addresses only. If enabled, all - nodes are given only RFC 1918 private addresses and - communicate with the master via private networking. - enable_private_endpoint: - Whether the master’s internal IP address is used as the - cluster endpoint. - master_ipv4_cidr_block: - The IP range in CIDR notation to use for the hosted master - network. This range will be used for assigning internal IP - addresses to the master or set of masters, as well as the ILB - VIP. This range must not overlap with any other ranges in use - within the cluster’s network. - private_endpoint: - Output only. The internal IP address of this cluster’s master - endpoint. - public_endpoint: - Output only. The external IP address of this cluster’s master - endpoint. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.PrivateClusterConfig) - }, -) -_sym_db.RegisterMessage(PrivateClusterConfig) - -IstioConfig = _reflection.GeneratedProtocolMessageType( - "IstioConfig", - (_message.Message,), - { - "DESCRIPTOR": _ISTIOCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration options for Istio addon. - - Attributes: - disabled: - Whether Istio is enabled for this cluster. - auth: - The specified Istio auth mode, either none, or mutual TLS. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.IstioConfig) - }, -) -_sym_db.RegisterMessage(IstioConfig) - -CloudRunConfig = _reflection.GeneratedProtocolMessageType( - "CloudRunConfig", - (_message.Message,), - { - "DESCRIPTOR": _CLOUDRUNCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the Cloud Run feature. - - Attributes: - disabled: - Whether Cloud Run addon is enabled for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.CloudRunConfig) - }, -) -_sym_db.RegisterMessage(CloudRunConfig) - -MasterAuthorizedNetworksConfig = _reflection.GeneratedProtocolMessageType( - "MasterAuthorizedNetworksConfig", - (_message.Message,), - { - "CidrBlock": _reflection.GeneratedProtocolMessageType( - "CidrBlock", - (_message.Message,), - { - "DESCRIPTOR": _MASTERAUTHORIZEDNETWORKSCONFIG_CIDRBLOCK, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """CidrBlock contains an optional name and one CIDR block. - - Attributes: - display_name: - display_name is an optional field for users to identify CIDR - blocks. - cidr_block: - cidr_block must be specified in CIDR notation. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.MasterAuthorizedNetworksConfig.CidrBlock) - }, - ), - "DESCRIPTOR": _MASTERAUTHORIZEDNETWORKSCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the master authorized networks feature. - Enabled master authorized networks will disallow all external traffic - to access Kubernetes master through HTTPS except traffic from the - given CIDR blocks, Google Compute Engine Public IPs and Google Prod - IPs. - - Attributes: - enabled: - Whether or not master authorized networks is enabled. - cidr_blocks: - cidr_blocks define up to 10 external networks that could - access Kubernetes master through HTTPS. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.MasterAuthorizedNetworksConfig) - }, -) -_sym_db.RegisterMessage(MasterAuthorizedNetworksConfig) -_sym_db.RegisterMessage(MasterAuthorizedNetworksConfig.CidrBlock) - -LegacyAbac = _reflection.GeneratedProtocolMessageType( - "LegacyAbac", - (_message.Message,), - { - "DESCRIPTOR": _LEGACYABAC, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for the legacy Attribute Based Access Control - authorization mode. - - Attributes: - enabled: - Whether the ABAC authorizer is enabled for this cluster. When - enabled, identities in the system, including service accounts, - nodes, and controllers, will have statically granted - permissions beyond those provided by the RBAC configuration or - IAM. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.LegacyAbac) - }, -) -_sym_db.RegisterMessage(LegacyAbac) - -NetworkPolicy = _reflection.GeneratedProtocolMessageType( - "NetworkPolicy", - (_message.Message,), - { - "DESCRIPTOR": _NETWORKPOLICY, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration options for the NetworkPolicy feature. - https://kubernetes.io/docs/concepts/services- - networking/networkpolicies/ - - Attributes: - provider: - The selected network policy provider. - enabled: - Whether network policy is enabled on the cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NetworkPolicy) - }, -) -_sym_db.RegisterMessage(NetworkPolicy) - -IPAllocationPolicy = _reflection.GeneratedProtocolMessageType( - "IPAllocationPolicy", - (_message.Message,), - { - "DESCRIPTOR": _IPALLOCATIONPOLICY, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for controlling how IPs are allocated in the cluster. - - Attributes: - use_ip_aliases: - Whether alias IPs will be used for pod IPs in the cluster. - create_subnetwork: - Whether a new subnetwork will be created automatically for the - cluster. This field is only applicable when - ``use_ip_aliases`` is true. - subnetwork_name: - A custom subnetwork name to be used if ``create_subnetwork`` - is true. If this field is empty, then an automatic name will - be chosen for the new subnetwork. - cluster_ipv4_cidr: - This field is deprecated, use cluster_ipv4_cidr_block. - node_ipv4_cidr: - This field is deprecated, use node_ipv4_cidr_block. - services_ipv4_cidr: - This field is deprecated, use services_ipv4_cidr_block. - cluster_secondary_range_name: - The name of the secondary range to be used for the cluster - CIDR block. The secondary range will be used for pod IP - addresses. This must be an existing secondary range associated - with the cluster subnetwork. This field is only applicable - with use_ip_aliases and create_subnetwork is false. - services_secondary_range_name: - The name of the secondary range to be used as for the services - CIDR block. The secondary range will be used for service - ClusterIPs. This must be an existing secondary range - associated with the cluster subnetwork. This field is only - applicable with use_ip_aliases and create_subnetwork is false. - cluster_ipv4_cidr_block: - The IP address range for the cluster pod IPs. If this field is - set, then ``cluster.cluster_ipv4_cidr`` must be left blank. - This field is only applicable when ``use_ip_aliases`` is true. - Set to blank to have a range chosen with the default size. - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - node_ipv4_cidr_block: - The IP address range of the instance IPs in this cluster. - This is applicable only if ``create_subnetwork`` is true. Set - to blank to have a range chosen with the default size. Set to - /netmask (e.g. ``/14``) to have a range chosen with a specific - netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - services_ipv4_cidr_block: - The IP address range of the services IPs in this cluster. If - blank, a range will be automatically chosen with the default - size. This field is only applicable when ``use_ip_aliases`` - is true. Set to blank to have a range chosen with the default - size. Set to /netmask (e.g. ``/14``) to have a range chosen - with a specific netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - allow_route_overlap: - If true, allow allocation of cluster CIDR ranges that overlap - with certain kinds of network routes. By default we do not - allow cluster CIDR ranges to intersect with any user declared - routes. With allow_route_overlap == true, we allow overlapping - with CIDR ranges that are larger than the cluster CIDR range. - If this field is set to true, then cluster and services CIDRs - must be fully-specified (e.g. ``10.96.0.0/14``, but not - ``/14``), which means: 1) When ``use_ip_aliases`` is true, - ``cluster_ipv4_cidr_block`` and ``services_ipv4_cidr_block`` - must be fully-specified. 2) When ``use_ip_aliases`` is false, - ``cluster.cluster_ipv4_cidr`` muse be fully-specified. - tpu_ipv4_cidr_block: - The IP address range of the Cloud TPUs in this cluster. If - unspecified, a range will be automatically chosen with the - default size. This field is only applicable when - ``use_ip_aliases`` is true. If unspecified, the range will - use the default size. Set to /netmask (e.g. ``/14``) to have - a range chosen with a specific netmask. Set to a `CIDR - `__ notation (e.g. ``10.96.0.0/14``) from the - RFC-1918 private networks (e.g. ``10.0.0.0/8``, - ``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific - range to use. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.IPAllocationPolicy) - }, -) -_sym_db.RegisterMessage(IPAllocationPolicy) - -BinaryAuthorization = _reflection.GeneratedProtocolMessageType( - "BinaryAuthorization", - (_message.Message,), - { - "DESCRIPTOR": _BINARYAUTHORIZATION, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for Binary Authorization. - - Attributes: - enabled: - Enable Binary Authorization for this cluster. If enabled, all - container images will be validated by Google Binauthz. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.BinaryAuthorization) - }, -) -_sym_db.RegisterMessage(BinaryAuthorization) - -PodSecurityPolicyConfig = _reflection.GeneratedProtocolMessageType( - "PodSecurityPolicyConfig", - (_message.Message,), - { - "DESCRIPTOR": _PODSECURITYPOLICYCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for the PodSecurityPolicy feature. - - Attributes: - enabled: - Enable the PodSecurityPolicy controller for this cluster. If - enabled, pods must be valid under a PodSecurityPolicy to be - created. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.PodSecurityPolicyConfig) - }, -) -_sym_db.RegisterMessage(PodSecurityPolicyConfig) - -AuthenticatorGroupsConfig = _reflection.GeneratedProtocolMessageType( - "AuthenticatorGroupsConfig", - (_message.Message,), - { - "DESCRIPTOR": _AUTHENTICATORGROUPSCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for returning group information from authenticators. - - Attributes: - enabled: - Whether this cluster should return group membership lookups - during authentication using a group of security groups. - security_group: - The name of the security group-of-groups to be used. Only - relevant if enabled = true. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.AuthenticatorGroupsConfig) - }, -) -_sym_db.RegisterMessage(AuthenticatorGroupsConfig) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - { - "ResourceLabelsEntry": _reflection.GeneratedProtocolMessageType( - "ResourceLabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTER_RESOURCELABELSENTRY, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2" - # @@protoc_insertion_point(class_scope:google.container.v1beta1.Cluster.ResourceLabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTER, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """A Google Kubernetes Engine cluster. - - Attributes: - name: - The name of this cluster. The name must be unique within this - project and location (e.g. zone or region), and can be up to - 40 characters with the following restrictions: - Lowercase - letters, numbers, and hyphens only. - Must start with a - letter. - Must end with a number or a letter. - description: - An optional description of this cluster. - initial_node_count: - The number of nodes to create in this cluster. You must ensure - that your Compute Engine `resource quota - `__ is sufficient for - this number of instances. You must also have available - firewall and routes quota. For requests, this field should - only be used in lieu of a “node_pool” object, since this - configuration (along with the “node_config”) will be used to - create a “NodePool” object with an auto-generated name. Do not - use this and a node_pool at the same time. This field is - deprecated, use node_pool.initial_node_count instead. - node_config: - Parameters used in creating the cluster’s nodes. For requests, - this field should only be used in lieu of a “node_pool” - object, since this configuration (along with the - “initial_node_count”) will be used to create a “NodePool” - object with an auto-generated name. Do not use this and a - node_pool at the same time. For responses, this field will be - populated with the node configuration of the first node pool. - (For configuration of each node pool, see - ``node_pool.config``) If unspecified, the defaults are used. - This field is deprecated, use node_pool.config instead. - master_auth: - The authentication information for accessing the master - endpoint. If unspecified, the defaults are used: For clusters - before v1.12, if master_auth is unspecified, ``username`` will - be set to “admin”, a random password will be generated, and a - client certificate will be issued. - logging_service: - The logging service the cluster should use to write logs. - Currently available options: - ``logging.googleapis.com`` - - the Google Cloud Logging service. - ``none`` - no logs will - be exported from the cluster. - if left as an empty string,\ - ``logging.googleapis.com`` will be used. - monitoring_service: - The monitoring service the cluster should use to write - metrics. Currently available options: - - ``monitoring.googleapis.com`` - the Google Cloud Monitoring - service. - ``none`` - no metrics will be exported from the - cluster. - if left as an empty string, - ``monitoring.googleapis.com`` will be used. - network: - The name of the Google Compute Engine `network - `__ to which the cluster is connected. If - left unspecified, the ``default`` network will be used. On - output this shows the network ID instead of the name. - cluster_ipv4_cidr: - The IP address range of the container pods in this cluster, in - `CIDR `__ notation (e.g. ``10.96.0.0/14``). Leave - blank to have one automatically chosen or specify a ``/14`` - block in ``10.0.0.0/8``. - addons_config: - Configurations for the various addons available to run in the - cluster. - subnetwork: - The name of the Google Compute Engine `subnetwork - `__ to - which the cluster is connected. On output this shows the - subnetwork ID instead of the name. - node_pools: - The node pools associated with this cluster. This field should - not be set if “node_config” or “initial_node_count” are - specified. - locations: - The list of Google Compute Engine `zones - `__ in - which the cluster’s nodes should be located. - enable_kubernetes_alpha: - Kubernetes alpha features are enabled on this cluster. This - includes alpha API groups (e.g. v1beta1) and features that may - not be production ready in the kubernetes version of the - master and nodes. The cluster has no SLA for uptime and - master/node upgrades are disabled. Alpha enabled clusters are - automatically deleted thirty days after creation. - resource_labels: - The resource labels for the cluster to use to annotate any - related Google Compute Engine resources. - label_fingerprint: - The fingerprint of the set of labels for this cluster. - legacy_abac: - Configuration for the legacy ABAC authorization mode. - network_policy: - Configuration options for the NetworkPolicy feature. - ip_allocation_policy: - Configuration for cluster IP allocation. - master_authorized_networks_config: - The configuration options for master authorized networks - feature. - maintenance_policy: - Configure the maintenance policy for this cluster. - binary_authorization: - Configuration for Binary Authorization. - pod_security_policy_config: - Configuration for the PodSecurityPolicy feature. - autoscaling: - Cluster-level autoscaling configuration. - network_config: - Configuration for cluster networking. - private_cluster: - If this is a private cluster setup. Private clusters are - clusters that, by default have no external IP addresses on the - nodes and where nodes and the master communicate over private - IP addresses. This field is deprecated, use - private_cluster_config.enable_private_nodes instead. - master_ipv4_cidr_block: - The IP prefix in CIDR notation to use for the hosted master - network. This prefix will be used for assigning private IP - addresses to the master or set of masters, as well as the ILB - VIP. This field is deprecated, use - private_cluster_config.master_ipv4_cidr_block instead. - default_max_pods_constraint: - The default constraint on the maximum number of pods that can - be run simultaneously on a node in the node pool of this - cluster. Only honored if cluster created with IP Alias - support. - resource_usage_export_config: - Configuration for exporting resource usages. Resource usage - export is disabled when this config unspecified. - authenticator_groups_config: - Configuration controlling RBAC group membership information. - private_cluster_config: - Configuration for private cluster. - vertical_pod_autoscaling: - Cluster-level Vertical Pod Autoscaling configuration. - self_link: - [Output only] Server-defined URL for the resource. - zone: - [Output only] The name of the Google Compute Engine `zone - `__ in - which the cluster resides. This field is deprecated, use - location instead. - endpoint: - [Output only] The IP address of this cluster’s master - endpoint. The endpoint can be accessed from the internet at - ``https://username:password@endpoint/``. See the - ``masterAuth`` property of this resource for username and - password information. - initial_cluster_version: - The initial Kubernetes version for this cluster. Valid - versions are those found in validMasterVersions returned by - getServerConfig. The version can be upgraded over time; such - upgrades are reflected in currentMasterVersion and - currentNodeVersion. Users may specify either explicit - versions offered by Kubernetes Engine or version aliases, - which have the following behavior: - “latest”: picks the - highest valid Kubernetes version - “1.X”: picks the highest - valid patch+gke.N patch in the 1.X version - “1.X.Y”: picks - the highest valid gke.N patch in the 1.X.Y version - - “1.X.Y-gke.N”: picks an explicit Kubernetes version - "“,”-": - picks the default Kubernetes version - current_master_version: - [Output only] The current software version of the master - endpoint. - current_node_version: - [Output only] Deprecated, use `NodePool.version - `__ instead. - The current version of the node software components. If they - are currently at multiple versions because they’re in the - process of being upgraded, this reflects the minimum version - of all nodes. - create_time: - [Output only] The time the cluster was created, in `RFC3339 - `__ text format. - status: - [Output only] The current status of this cluster. - status_message: - [Output only] Additional information about the current status - of this cluster, if available. - node_ipv4_cidr_size: - [Output only] The size of the address space on each node for - hosting containers. This is provisioned from within the - ``container_ipv4_cidr`` range. This field will only be set - when cluster is in route-based network mode. - services_ipv4_cidr: - [Output only] The IP address range of the Kubernetes services - in this cluster, in `CIDR - `__ notation (e.g. ``1.2.3.4/29``). Service - addresses are typically put in the last ``/16`` from the - container CIDR. - instance_group_urls: - Deprecated. Use node_pools.instance_group_urls. - current_node_count: - [Output only] The number of nodes currently in the cluster. - Deprecated. Call Kubernetes API directly to retrieve node - information. - expire_time: - [Output only] The time the cluster will be automatically - deleted in `RFC3339 `__ - text format. - location: - [Output only] The name of the Google Compute Engine `zone - `__ or `region - `__ in which the cluster resides. - enable_tpu: - Enable the ability to use Cloud TPUs in this cluster. - tpu_ipv4_cidr_block: - [Output only] The IP address range of the Cloud TPUs in this - cluster, in `CIDR - `__ notation (e.g. ``1.2.3.4/29``). - database_encryption: - Configuration of etcd encryption. - conditions: - Which conditions caused the current cluster state. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.Cluster) - }, -) -_sym_db.RegisterMessage(Cluster) -_sym_db.RegisterMessage(Cluster.ResourceLabelsEntry) - -ClusterUpdate = _reflection.GeneratedProtocolMessageType( - "ClusterUpdate", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERUPDATE, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ClusterUpdate describes an update to the cluster. Exactly one update - can be applied to a cluster with each request, so at most one field - can be provided. - - Attributes: - desired_node_version: - The Kubernetes version to change the nodes to (typically an - upgrade). Users may specify either explicit versions offered - by Kubernetes Engine or version aliases, which have the - following behavior: - “latest”: picks the highest valid - Kubernetes version - “1.X”: picks the highest valid - patch+gke.N patch in the 1.X version - “1.X.Y”: picks the - highest valid gke.N patch in the 1.X.Y version - - “1.X.Y-gke.N”: picks an explicit Kubernetes version - “-”: - picks the Kubernetes master version - desired_monitoring_service: - The monitoring service the cluster should use to write - metrics. Currently available options: - - “monitoring.googleapis.com/kubernetes” - the Google Cloud - Monitoring service with Kubernetes-native resource model - - “monitoring.googleapis.com” - the Google Cloud Monitoring - service - “none” - no metrics will be exported from the - cluster - desired_addons_config: - Configurations for the various addons available to run in the - cluster. - desired_node_pool_id: - The node pool to be upgraded. This field is mandatory if - “desired_node_version”, “desired_image_family”, - “desired_node_pool_autoscaling”, or - “desired_workload_metadata_config” is specified and there is - more than one node pool on the cluster. - desired_image_type: - The desired image type for the node pool. NOTE: Set the - “desired_node_pool” field as well. - desired_node_pool_autoscaling: - Autoscaler configuration for the node pool specified in - desired_node_pool_id. If there is only one pool in the cluster - and desired_node_pool_id is not provided then the change - applies to that single node pool. - desired_locations: - The desired list of Google Compute Engine `zones - `__ in - which the cluster’s nodes should be located. Changing the - locations a cluster is in will result in nodes being either - created or removed from the cluster, depending on whether - locations are being added or removed. This list must always - include the cluster’s primary zone. - desired_master_authorized_networks_config: - The desired configuration options for master authorized - networks feature. - desired_pod_security_policy_config: - The desired configuration options for the PodSecurityPolicy - feature. - desired_cluster_autoscaling: - Cluster-level autoscaling configuration. - desired_binary_authorization: - The desired configuration options for the Binary Authorization - feature. - desired_logging_service: - The logging service the cluster should use to write metrics. - Currently available options: - - “logging.googleapis.com/kubernetes” - the Google Cloud Logging - service with Kubernetes-native resource model - - “logging.googleapis.com” - the Google Cloud Logging service - - “none” - no logs will be exported from the cluster - desired_resource_usage_export_config: - The desired configuration for exporting resource usage. - desired_vertical_pod_autoscaling: - Cluster-level Vertical Pod Autoscaling configuration. - desired_intra_node_visibility_config: - The desired config of Intra-node visibility. - desired_master_version: - The Kubernetes version to change the master to. The only valid - value is the latest supported version. Users may specify - either explicit versions offered by Kubernetes Engine or - version aliases, which have the following behavior: - - “latest”: picks the highest valid Kubernetes version - “1.X”: - picks the highest valid patch+gke.N patch in the 1.X version - - “1.X.Y”: picks the highest valid gke.N patch in the 1.X.Y - version - “1.X.Y-gke.N”: picks an explicit Kubernetes version - - “-”: picks the default Kubernetes version - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ClusterUpdate) - }, -) -_sym_db.RegisterMessage(ClusterUpdate) - -Operation = _reflection.GeneratedProtocolMessageType( - "Operation", - (_message.Message,), - { - "DESCRIPTOR": _OPERATION, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """This operation resource represents operations that may have happened - or are happening on the cluster. All fields are output only. - - Attributes: - name: - The server-assigned ID for the operation. - zone: - The name of the Google Compute Engine `zone - `__ in - which the operation is taking place. This field is deprecated, - use location instead. - operation_type: - The operation type. - status: - The current status of the operation. - detail: - Detailed operation progress, if available. - status_message: - If an error has occurred, a textual description of the error. - self_link: - Server-defined URL for the resource. - target_link: - Server-defined URL for the target of the operation. - location: - [Output only] The name of the Google Compute Engine `zone - `__ or `region - `__ in which the cluster resides. - start_time: - [Output only] The time the operation started, in `RFC3339 - `__ text format. - end_time: - [Output only] The time the operation completed, in `RFC3339 - `__ text format. - progress: - [Output only] Progress information for an operation. - cluster_conditions: - Which conditions caused the current cluster state. - nodepool_conditions: - Which conditions caused the current node pool state. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.Operation) - }, -) -_sym_db.RegisterMessage(Operation) - -OperationProgress = _reflection.GeneratedProtocolMessageType( - "OperationProgress", - (_message.Message,), - { - "Metric": _reflection.GeneratedProtocolMessageType( - "Metric", - (_message.Message,), - { - "DESCRIPTOR": _OPERATIONPROGRESS_METRIC, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Progress metric is (string, int|float|string) pair. - - Attributes: - name: - Metric name, required. e.g., “nodes total”, “percent done” - value: - Strictly one of the values is required. - int_value: - For metrics with integer value. - double_value: - For metrics with floating point value. - string_value: - For metrics with custom values (ratios, visual progress, - etc.). - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.OperationProgress.Metric) - }, - ), - "DESCRIPTOR": _OPERATIONPROGRESS, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Information about operation (or operation stage) progress. - - Attributes: - name: - A non-parameterized string describing an operation stage. - Unset for single-stage operations. - status: - Status of an operation stage. Unset for single-stage - operations. - metrics: - Progress metric bundle, for example: metrics: [{name: “nodes - done”, int_value: 15}, {name: “nodes total”, int_value: 32}] - or metrics: [{name: “progress”, double_value: 0.56}, {name: - “progress scale”, double_value: 1.0}] - stages: - Substages of an operation or a stage. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.OperationProgress) - }, -) -_sym_db.RegisterMessage(OperationProgress) -_sym_db.RegisterMessage(OperationProgress.Metric) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """CreateClusterRequest creates a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the parent field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the parent field. - cluster: - Required. A `cluster resource - `__ - parent: - The parent (project and location) where the cluster will be - created. Specified in the format ``projects/*/locations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.CreateClusterRequest) - }, -) -_sym_db.RegisterMessage(CreateClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCLUSTERREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """GetClusterRequest gets the settings of a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to retrieve. - This field has been deprecated and replaced by the name field. - name: - The name (project, location, cluster) of the cluster to - retrieve. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.GetClusterRequest) - }, -) -_sym_db.RegisterMessage(GetClusterRequest) - -UpdateClusterRequest = _reflection.GeneratedProtocolMessageType( - "UpdateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECLUSTERREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """UpdateClusterRequest updates the settings of a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - update: - Required. A description of the update. - name: - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.UpdateClusterRequest) - }, -) -_sym_db.RegisterMessage(UpdateClusterRequest) - -UpdateNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "UpdateNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATENODEPOOLREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetNodePoolVersionRequest updates the version of a node pool. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - node_pool_id: - Required. Deprecated. The name of the node pool to upgrade. - This field has been deprecated and replaced by the name field. - node_version: - Required. The Kubernetes version to change the nodes to - (typically an upgrade). Users may specify either explicit - versions offered by Kubernetes Engine or version aliases, - which have the following behavior: - “latest”: picks the - highest valid Kubernetes version - “1.X”: picks the highest - valid patch+gke.N patch in the 1.X version - “1.X.Y”: picks - the highest valid gke.N patch in the 1.X.Y version - - “1.X.Y-gke.N”: picks an explicit Kubernetes version - “-”: - picks the Kubernetes master version - image_type: - Required. The desired image type for the node pool. - workload_metadata_config: - The desired image type for the node pool. - name: - The name (project, location, cluster, node pool) of the node - pool to update. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.UpdateNodePoolRequest) - }, -) -_sym_db.RegisterMessage(UpdateNodePoolRequest) - -SetNodePoolAutoscalingRequest = _reflection.GeneratedProtocolMessageType( - "SetNodePoolAutoscalingRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNODEPOOLAUTOSCALINGREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetNodePoolAutoscalingRequest sets the autoscaler settings of a node - pool. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - node_pool_id: - Required. Deprecated. The name of the node pool to upgrade. - This field has been deprecated and replaced by the name field. - autoscaling: - Required. Autoscaling configuration for the node pool. - name: - The name (project, location, cluster, node pool) of the node - pool to set autoscaler settings. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetNodePoolAutoscalingRequest) - }, -) -_sym_db.RegisterMessage(SetNodePoolAutoscalingRequest) - -SetLoggingServiceRequest = _reflection.GeneratedProtocolMessageType( - "SetLoggingServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETLOGGINGSERVICEREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetLoggingServiceRequest sets the logging service of a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - logging_service: - Required. The logging service the cluster should use to write - metrics. Currently available options: - - “logging.googleapis.com” - the Google Cloud Logging service - - “none” - no metrics will be exported from the cluster - name: - The name (project, location, cluster) of the cluster to set - logging. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetLoggingServiceRequest) - }, -) -_sym_db.RegisterMessage(SetLoggingServiceRequest) - -SetMonitoringServiceRequest = _reflection.GeneratedProtocolMessageType( - "SetMonitoringServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETMONITORINGSERVICEREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetMonitoringServiceRequest sets the monitoring service of a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - monitoring_service: - Required. The monitoring service the cluster should use to - write metrics. Currently available options: - - “monitoring.googleapis.com” - the Google Cloud Monitoring - service - “none” - no metrics will be exported from the - cluster - name: - The name (project, location, cluster) of the cluster to set - monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetMonitoringServiceRequest) - }, -) -_sym_db.RegisterMessage(SetMonitoringServiceRequest) - -SetAddonsConfigRequest = _reflection.GeneratedProtocolMessageType( - "SetAddonsConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETADDONSCONFIGREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetAddonsRequest sets the addons associated with the cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - addons_config: - Required. The desired configurations for the various addons - available to run in the cluster. - name: - The name (project, location, cluster) of the cluster to set - addons. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetAddonsConfigRequest) - }, -) -_sym_db.RegisterMessage(SetAddonsConfigRequest) - -SetLocationsRequest = _reflection.GeneratedProtocolMessageType( - "SetLocationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETLOCATIONSREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetLocationsRequest sets the locations of the cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - locations: - Required. The desired list of Google Compute Engine `zones - `__ in - which the cluster’s nodes should be located. Changing the - locations a cluster is in will result in nodes being either - created or removed from the cluster, depending on whether - locations are being added or removed. This list must always - include the cluster’s primary zone. - name: - The name (project, location, cluster) of the cluster to set - locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetLocationsRequest) - }, -) -_sym_db.RegisterMessage(SetLocationsRequest) - -UpdateMasterRequest = _reflection.GeneratedProtocolMessageType( - "UpdateMasterRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEMASTERREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """UpdateMasterRequest updates the master of the cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - master_version: - Required. The Kubernetes version to change the master to. - Users may specify either explicit versions offered by - Kubernetes Engine or version aliases, which have the following - behavior: - “latest”: picks the highest valid Kubernetes - version - “1.X”: picks the highest valid patch+gke.N patch in - the 1.X version - “1.X.Y”: picks the highest valid gke.N - patch in the 1.X.Y version - “1.X.Y-gke.N”: picks an explicit - Kubernetes version - “-”: picks the default Kubernetes - version - name: - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.UpdateMasterRequest) - }, -) -_sym_db.RegisterMessage(UpdateMasterRequest) - -SetMasterAuthRequest = _reflection.GeneratedProtocolMessageType( - "SetMasterAuthRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETMASTERAUTHREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetMasterAuthRequest updates the admin password of a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to upgrade. This - field has been deprecated and replaced by the name field. - action: - Required. The exact form of action to be taken on the master - auth. - update: - Required. A description of the update. - name: - The name (project, location, cluster) of the cluster to set - auth. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetMasterAuthRequest) - }, -) -_sym_db.RegisterMessage(SetMasterAuthRequest) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETECLUSTERREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """DeleteClusterRequest deletes a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to delete. This - field has been deprecated and replaced by the name field. - name: - The name (project, location, cluster) of the cluster to - delete. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.DeleteClusterRequest) - }, -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListClustersRequest lists clusters. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the parent field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides, or “-” for all zones. This field - has been deprecated and replaced by the parent field. - parent: - The parent (project and location) where the clusters will be - listed. Specified in the format ``projects/*/locations/*``. - Location “-” matches all zones and all regions. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListClustersRequest) - }, -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSRESPONSE, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListClustersResponse is the result of ListClustersRequest. - - Attributes: - clusters: - A list of clusters in the project in the specified zone, or - across all ones. - missing_zones: - If any zones are listed here, the list of clusters returned - may be missing those zones. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListClustersResponse) - }, -) -_sym_db.RegisterMessage(ListClustersResponse) - -GetOperationRequest = _reflection.GeneratedProtocolMessageType( - "GetOperationRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETOPERATIONREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """GetOperationRequest gets a single operation. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - operation_id: - Required. Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced by the - name field. - name: - The name (project, location, operation id) of the operation to - get. Specified in the format - ``projects/*/locations/*/operations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.GetOperationRequest) - }, -) -_sym_db.RegisterMessage(GetOperationRequest) - -ListOperationsRequest = _reflection.GeneratedProtocolMessageType( - "ListOperationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTOPERATIONSREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListOperationsRequest lists operations. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the parent field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ to - return operations for, or ``-`` for all zones. This field has - been deprecated and replaced by the parent field. - parent: - The parent (project and location) where the operations will be - listed. Specified in the format ``projects/*/locations/*``. - Location “-” matches all zones and all regions. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListOperationsRequest) - }, -) -_sym_db.RegisterMessage(ListOperationsRequest) - -CancelOperationRequest = _reflection.GeneratedProtocolMessageType( - "CancelOperationRequest", - (_message.Message,), - { - "DESCRIPTOR": _CANCELOPERATIONREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """CancelOperationRequest cancels a single operation. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the operation resides. This field has been deprecated - and replaced by the name field. - operation_id: - Required. Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced by the - name field. - name: - The name (project, location, operation id) of the operation to - cancel. Specified in the format - ``projects/*/locations/*/operations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.CancelOperationRequest) - }, -) -_sym_db.RegisterMessage(CancelOperationRequest) - -ListOperationsResponse = _reflection.GeneratedProtocolMessageType( - "ListOperationsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTOPERATIONSRESPONSE, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListOperationsResponse is the result of ListOperationsRequest. - - Attributes: - operations: - A list of operations in the project in the specified zone. - missing_zones: - If any zones are listed here, the list of operations returned - may be missing the operations from those zones. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListOperationsResponse) - }, -) -_sym_db.RegisterMessage(ListOperationsResponse) - -GetServerConfigRequest = _reflection.GeneratedProtocolMessageType( - "GetServerConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETSERVERCONFIGREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Gets the current Kubernetes Engine service configuration. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ to - return operations for. This field has been deprecated and - replaced by the name field. - name: - The name (project and location) of the server config to get, - specified in the format ``projects/*/locations/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.GetServerConfigRequest) - }, -) -_sym_db.RegisterMessage(GetServerConfigRequest) - -ServerConfig = _reflection.GeneratedProtocolMessageType( - "ServerConfig", - (_message.Message,), - { - "DESCRIPTOR": _SERVERCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Kubernetes Engine service configuration. - - Attributes: - default_cluster_version: - Version of Kubernetes the service deploys by default. - valid_node_versions: - List of valid node upgrade target versions. - default_image_type: - Default image type. - valid_image_types: - List of valid image types. - valid_master_versions: - List of valid master versions. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ServerConfig) - }, -) -_sym_db.RegisterMessage(ServerConfig) - -CreateNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "CreateNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATENODEPOOLREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """CreateNodePoolRequest creates a node pool for a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the parent field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the parent field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the parent field. - node_pool: - Required. The node pool to create. - parent: - The parent (project, location, cluster id) where the node pool - will be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.CreateNodePoolRequest) - }, -) -_sym_db.RegisterMessage(CreateNodePoolRequest) - -DeleteNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "DeleteNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETENODEPOOLREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """DeleteNodePoolRequest deletes a node pool for a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the name field. - node_pool_id: - Required. Deprecated. The name of the node pool to delete. - This field has been deprecated and replaced by the name field. - name: - The name (project, location, cluster, node pool id) of the - node pool to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.DeleteNodePoolRequest) - }, -) -_sym_db.RegisterMessage(DeleteNodePoolRequest) - -ListNodePoolsRequest = _reflection.GeneratedProtocolMessageType( - "ListNodePoolsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTNODEPOOLSREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListNodePoolsRequest lists the node pool(s) for a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the parent field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the parent field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the parent field. - parent: - The parent (project, location, cluster id) where the node - pools will be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListNodePoolsRequest) - }, -) -_sym_db.RegisterMessage(ListNodePoolsRequest) - -GetNodePoolRequest = _reflection.GeneratedProtocolMessageType( - "GetNodePoolRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETNODEPOOLREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """GetNodePoolRequest retrieves a node pool for a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the name field. - node_pool_id: - Required. Deprecated. The name of the node pool. This field - has been deprecated and replaced by the name field. - name: - The name (project, location, cluster, node pool id) of the - node pool to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.GetNodePoolRequest) - }, -) -_sym_db.RegisterMessage(GetNodePoolRequest) - -NodePool = _reflection.GeneratedProtocolMessageType( - "NodePool", - (_message.Message,), - { - "DESCRIPTOR": _NODEPOOL, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """NodePool contains the name and configuration for a cluster’s node - pool. Node pools are a set of nodes (i.e. VM’s), with a common - configuration and specification, under the control of the cluster - master. They may have a set of Kubernetes labels applied to them, - which may be used to reference them during pod scheduling. They may - also be resized up or down, to accommodate the workload. - - Attributes: - name: - The name of the node pool. - config: - The node configuration of the pool. - initial_node_count: - The initial node count for the pool. You must ensure that your - Compute Engine `resource quota - `__ is sufficient for - this number of instances. You must also have available - firewall and routes quota. - self_link: - [Output only] Server-defined URL for the resource. - version: - The version of the Kubernetes of this node. - instance_group_urls: - [Output only] The resource URLs of the `managed instance - groups `__ associated - with this node pool. - status: - [Output only] The status of the nodes in this pool instance. - status_message: - [Output only] Additional information about the current status - of this node pool instance, if available. - autoscaling: - Autoscaler configuration for this NodePool. Autoscaler is - enabled only if a valid configuration is present. - management: - NodeManagement configuration for this NodePool. - max_pods_constraint: - The constraint on the maximum number of pods that can be run - simultaneously on a node in the node pool. - conditions: - Which conditions caused the current node pool state. - pod_ipv4_cidr_size: - [Output only] The pod CIDR block size per node in this node - pool. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NodePool) - }, -) -_sym_db.RegisterMessage(NodePool) - -NodeManagement = _reflection.GeneratedProtocolMessageType( - "NodeManagement", - (_message.Message,), - { - "DESCRIPTOR": _NODEMANAGEMENT, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """NodeManagement defines the set of node management services turned on - for the node pool. - - Attributes: - auto_upgrade: - Whether the nodes will be automatically upgraded. - auto_repair: - Whether the nodes will be automatically repaired. - upgrade_options: - Specifies the Auto Upgrade knobs for the node pool. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NodeManagement) - }, -) -_sym_db.RegisterMessage(NodeManagement) - -AutoUpgradeOptions = _reflection.GeneratedProtocolMessageType( - "AutoUpgradeOptions", - (_message.Message,), - { - "DESCRIPTOR": _AUTOUPGRADEOPTIONS, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """AutoUpgradeOptions defines the set of options for the user to control - how the Auto Upgrades will proceed. - - Attributes: - auto_upgrade_start_time: - [Output only] This field is set when upgrades are about to - commence with the approximate start time for the upgrades, in - `RFC3339 `__ text - format. - description: - [Output only] This field is set when upgrades are about to - commence with the description of the upgrade. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.AutoUpgradeOptions) - }, -) -_sym_db.RegisterMessage(AutoUpgradeOptions) - -MaintenancePolicy = _reflection.GeneratedProtocolMessageType( - "MaintenancePolicy", - (_message.Message,), - { - "DESCRIPTOR": _MAINTENANCEPOLICY, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """MaintenancePolicy defines the maintenance policy to be used for the - cluster. - - Attributes: - window: - Specifies the maintenance window in which maintenance may be - performed. - resource_version: - A hash identifying the version of this policy, so that updates - to fields of the policy won’t accidentally undo intermediate - changes (and so that users of the API unaware of some fields - won’t accidentally remove other fields). Make a get() request - to the cluster to get the current resource version and include - it with requests to set the policy. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.MaintenancePolicy) - }, -) -_sym_db.RegisterMessage(MaintenancePolicy) - -MaintenanceWindow = _reflection.GeneratedProtocolMessageType( - "MaintenanceWindow", - (_message.Message,), - { - "MaintenanceExclusionsEntry": _reflection.GeneratedProtocolMessageType( - "MaintenanceExclusionsEntry", - (_message.Message,), - { - "DESCRIPTOR": _MAINTENANCEWINDOW_MAINTENANCEEXCLUSIONSENTRY, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2" - # @@protoc_insertion_point(class_scope:google.container.v1beta1.MaintenanceWindow.MaintenanceExclusionsEntry) - }, - ), - "DESCRIPTOR": _MAINTENANCEWINDOW, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """MaintenanceWindow defines the maintenance window to be used for the - cluster. - - Attributes: - policy: - Unimplemented, reserved for future use. - HourlyMaintenanceWindow hourly_maintenance_window = 1; - daily_maintenance_window: - DailyMaintenanceWindow specifies a daily maintenance operation - window. - recurring_window: - RecurringWindow specifies some number of recurring time - periods for maintenance to occur. The time windows may be - overlapping. If no maintenance windows are set, maintenance - can occur at any time. - maintenance_exclusions: - Exceptions to maintenance window. Non-emergency maintenance - should not occur in these windows. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.MaintenanceWindow) - }, -) -_sym_db.RegisterMessage(MaintenanceWindow) -_sym_db.RegisterMessage(MaintenanceWindow.MaintenanceExclusionsEntry) - -TimeWindow = _reflection.GeneratedProtocolMessageType( - "TimeWindow", - (_message.Message,), - { - "DESCRIPTOR": _TIMEWINDOW, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Represents an arbitrary window of time. - - Attributes: - start_time: - The time that the window first starts. - end_time: - The time that the window ends. The end time should take place - after the start time. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.TimeWindow) - }, -) -_sym_db.RegisterMessage(TimeWindow) - -RecurringTimeWindow = _reflection.GeneratedProtocolMessageType( - "RecurringTimeWindow", - (_message.Message,), - { - "DESCRIPTOR": _RECURRINGTIMEWINDOW, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Represents an arbitrary window of time that recurs. - - Attributes: - window: - The window of the first recurrence. - recurrence: - An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) - for how this window reccurs. They go on for the span of time - between the start and end time. For example, to have - something repeat every weekday, you’d use: - FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR To repeat some window daily - (equivalent to the DailyMaintenanceWindow): FREQ=DAILY For the - first weekend of every month: - FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU This specifies how - frequently the window starts. Eg, if you wanted to have a 9-5 - UTC-4 window every weekday, you’d use something like: start - time = 2019-01-01T09:00:00-0400 end time = - 2019-01-01T17:00:00-0400 recurrence = - FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR Windows can span multiple - days. Eg, to make the window encompass every weekend from - midnight Saturday till the last minute of Sunday UTC: start - time = 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z - recurrence = FREQ=WEEKLY;BYDAY=SA Note the start and end - time’s specific dates are largely arbitrary except to specify - duration of the window and when it first starts. The FREQ - values of HOURLY, MINUTELY, and SECONDLY are not supported. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.RecurringTimeWindow) - }, -) -_sym_db.RegisterMessage(RecurringTimeWindow) - -DailyMaintenanceWindow = _reflection.GeneratedProtocolMessageType( - "DailyMaintenanceWindow", - (_message.Message,), - { - "DESCRIPTOR": _DAILYMAINTENANCEWINDOW, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Time window specified for daily maintenance operations. - - Attributes: - start_time: - Time within the maintenance window to start the maintenance - operations. It must be in format “HH:MM”, where HH : [00-23] - and MM : [00-59] GMT. - duration: - [Output only] Duration of the time window, automatically - chosen to be smallest possible in the given scenario. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.DailyMaintenanceWindow) - }, -) -_sym_db.RegisterMessage(DailyMaintenanceWindow) - -SetNodePoolManagementRequest = _reflection.GeneratedProtocolMessageType( - "SetNodePoolManagementRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNODEPOOLMANAGEMENTREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetNodePoolManagementRequest sets the node management properties of a - node pool. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to update. This - field has been deprecated and replaced by the name field. - node_pool_id: - Required. Deprecated. The name of the node pool to update. - This field has been deprecated and replaced by the name field. - management: - Required. NodeManagement configuration for the node pool. - name: - The name (project, location, cluster, node pool id) of the - node pool to set management properties. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetNodePoolManagementRequest) - }, -) -_sym_db.RegisterMessage(SetNodePoolManagementRequest) - -SetNodePoolSizeRequest = _reflection.GeneratedProtocolMessageType( - "SetNodePoolSizeRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNODEPOOLSIZEREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetNodePoolSizeRequest sets the size a node pool. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to update. This - field has been deprecated and replaced by the name field. - node_pool_id: - Required. Deprecated. The name of the node pool to update. - This field has been deprecated and replaced by the name field. - node_count: - Required. The desired node count for the pool. - name: - The name (project, location, cluster, node pool id) of the - node pool to set size. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetNodePoolSizeRequest) - }, -) -_sym_db.RegisterMessage(SetNodePoolSizeRequest) - -RollbackNodePoolUpgradeRequest = _reflection.GeneratedProtocolMessageType( - "RollbackNodePoolUpgradeRequest", - (_message.Message,), - { - "DESCRIPTOR": _ROLLBACKNODEPOOLUPGRADEREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or - Failed NodePool upgrade. This will be an no-op if the last upgrade - successfully completed. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to rollback. - This field has been deprecated and replaced by the name field. - node_pool_id: - Required. Deprecated. The name of the node pool to rollback. - This field has been deprecated and replaced by the name field. - name: - The name (project, location, cluster, node pool id) of the - node poll to rollback upgrade. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.RollbackNodePoolUpgradeRequest) - }, -) -_sym_db.RegisterMessage(RollbackNodePoolUpgradeRequest) - -ListNodePoolsResponse = _reflection.GeneratedProtocolMessageType( - "ListNodePoolsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTNODEPOOLSRESPONSE, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListNodePoolsResponse is the result of ListNodePoolsRequest. - - Attributes: - node_pools: - A list of node pools for a cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListNodePoolsResponse) - }, -) -_sym_db.RegisterMessage(ListNodePoolsResponse) - -ClusterAutoscaling = _reflection.GeneratedProtocolMessageType( - "ClusterAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERAUTOSCALING, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ClusterAutoscaling contains global, per-cluster information required - by Cluster Autoscaler to automatically adjust the size of the cluster - and create/delete node pools based on the current needs. - - Attributes: - enable_node_autoprovisioning: - Enables automatic node pool creation and deletion. - resource_limits: - Contains global constraints regarding minimum and maximum - amount of resources in the cluster. - autoprovisioning_node_pool_defaults: - AutoprovisioningNodePoolDefaults contains defaults for a node - pool created by NAP. - autoprovisioning_locations: - The list of Google Compute Engine `zones - `__ in - which the NodePool’s nodes can be created by NAP. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ClusterAutoscaling) - }, -) -_sym_db.RegisterMessage(ClusterAutoscaling) - -AutoprovisioningNodePoolDefaults = _reflection.GeneratedProtocolMessageType( - "AutoprovisioningNodePoolDefaults", - (_message.Message,), - { - "DESCRIPTOR": _AUTOPROVISIONINGNODEPOOLDEFAULTS, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """AutoprovisioningNodePoolDefaults contains defaults for a node pool - created by NAP. - - Attributes: - oauth_scopes: - Scopes that are used by NAP when creating node pools. If - oauth_scopes are specified, service_account should be empty. - service_account: - The Google Cloud Platform Service Account to be used by the - node VMs. If service_account is specified, scopes should be - empty. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.AutoprovisioningNodePoolDefaults) - }, -) -_sym_db.RegisterMessage(AutoprovisioningNodePoolDefaults) - -ResourceLimit = _reflection.GeneratedProtocolMessageType( - "ResourceLimit", - (_message.Message,), - { - "DESCRIPTOR": _RESOURCELIMIT, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Contains information about amount of some resource in the cluster. For - memory, value should be in GB. - - Attributes: - resource_type: - Resource name “cpu”, “memory” or gpu-specific string. - minimum: - Minimum amount of the resource in the cluster. - maximum: - Maximum amount of the resource in the cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ResourceLimit) - }, -) -_sym_db.RegisterMessage(ResourceLimit) - -NodePoolAutoscaling = _reflection.GeneratedProtocolMessageType( - "NodePoolAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _NODEPOOLAUTOSCALING, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """NodePoolAutoscaling contains information required by cluster - autoscaler to adjust the size of the node pool to the current cluster - usage. - - Attributes: - enabled: - Is autoscaling enabled for this node pool. - min_node_count: - Minimum number of nodes in the NodePool. Must be >= 1 and <= - max_node_count. - max_node_count: - Maximum number of nodes in the NodePool. Must be >= - min_node_count. There has to enough quota to scale up the - cluster. - autoprovisioned: - Can this node pool be deleted automatically. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NodePoolAutoscaling) - }, -) -_sym_db.RegisterMessage(NodePoolAutoscaling) - -SetLabelsRequest = _reflection.GeneratedProtocolMessageType( - "SetLabelsRequest", - (_message.Message,), - { - "ResourceLabelsEntry": _reflection.GeneratedProtocolMessageType( - "ResourceLabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _SETLABELSREQUEST_RESOURCELABELSENTRY, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2" - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetLabelsRequest.ResourceLabelsEntry) - }, - ), - "DESCRIPTOR": _SETLABELSREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetLabelsRequest sets the Google Cloud Platform labels on a Google - Container Engine cluster, which will in turn set them for Google - Compute Engine resources used by that cluster - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the name field. - resource_labels: - Required. The labels to set for that cluster. - label_fingerprint: - Required. The fingerprint of the previous set of labels for - this resource, used to detect conflicts. The fingerprint is - initially generated by Kubernetes Engine and changes after - every request to modify or update labels. You must always - provide an up-to-date fingerprint hash when updating or - changing labels. Make a get() request to the resource to get - the latest fingerprint. - name: - The name (project, location, cluster id) of the cluster to set - labels. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetLabelsRequest) - }, -) -_sym_db.RegisterMessage(SetLabelsRequest) -_sym_db.RegisterMessage(SetLabelsRequest.ResourceLabelsEntry) - -SetLegacyAbacRequest = _reflection.GeneratedProtocolMessageType( - "SetLegacyAbacRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETLEGACYABACREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetLegacyAbacRequest enables or disables the ABAC authorization - mechanism for a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number - `__. This - field has been deprecated and replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster to update. This - field has been deprecated and replaced by the name field. - enabled: - Required. Whether ABAC authorization will be enabled in the - cluster. - name: - The name (project, location, cluster id) of the cluster to set - legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetLegacyAbacRequest) - }, -) -_sym_db.RegisterMessage(SetLegacyAbacRequest) - -StartIPRotationRequest = _reflection.GeneratedProtocolMessageType( - "StartIPRotationRequest", - (_message.Message,), - { - "DESCRIPTOR": _STARTIPROTATIONREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """StartIPRotationRequest creates a new IP for the cluster and then - performs a node upgrade on each node pool to point to the new IP. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the name field. - name: - The name (project, location, cluster id) of the cluster to - start IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - rotate_credentials: - Whether to rotate credentials during IP rotation. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.StartIPRotationRequest) - }, -) -_sym_db.RegisterMessage(StartIPRotationRequest) - -CompleteIPRotationRequest = _reflection.GeneratedProtocolMessageType( - "CompleteIPRotationRequest", - (_message.Message,), - { - "DESCRIPTOR": _COMPLETEIPROTATIONREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """CompleteIPRotationRequest moves the cluster master back into single-IP - mode. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the name field. - name: - The name (project, location, cluster id) of the cluster to - complete IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.CompleteIPRotationRequest) - }, -) -_sym_db.RegisterMessage(CompleteIPRotationRequest) - -AcceleratorConfig = _reflection.GeneratedProtocolMessageType( - "AcceleratorConfig", - (_message.Message,), - { - "DESCRIPTOR": _ACCELERATORCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """AcceleratorConfig represents a Hardware Accelerator request. - - Attributes: - accelerator_count: - The number of the accelerator cards exposed to an instance. - accelerator_type: - The accelerator type resource name. List of supported - accelerators `here - `__ - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.AcceleratorConfig) - }, -) -_sym_db.RegisterMessage(AcceleratorConfig) - -WorkloadMetadataConfig = _reflection.GeneratedProtocolMessageType( - "WorkloadMetadataConfig", - (_message.Message,), - { - "DESCRIPTOR": _WORKLOADMETADATACONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """WorkloadMetadataConfig defines the metadata configuration to expose to - workloads on the node pool. - - Attributes: - node_metadata: - NodeMetadata is the configuration for how to expose metadata - to the workloads running on the node. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.WorkloadMetadataConfig) - }, -) -_sym_db.RegisterMessage(WorkloadMetadataConfig) - -SetNetworkPolicyRequest = _reflection.GeneratedProtocolMessageType( - "SetNetworkPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETNETWORKPOLICYREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetNetworkPolicyRequest enables/disables network policy for a cluster. - - Attributes: - project_id: - Required. Deprecated. The Google Developers Console `project - ID or project number `__. This field has been deprecated and - replaced by the name field. - zone: - Required. Deprecated. The name of the Google Compute Engine - `zone - `__ in - which the cluster resides. This field has been deprecated and - replaced by the name field. - cluster_id: - Required. Deprecated. The name of the cluster. This field has - been deprecated and replaced by the name field. - network_policy: - Required. Configuration options for the NetworkPolicy feature. - name: - The name (project, location, cluster id) of the cluster to set - networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetNetworkPolicyRequest) - }, -) -_sym_db.RegisterMessage(SetNetworkPolicyRequest) - -SetMaintenancePolicyRequest = _reflection.GeneratedProtocolMessageType( - "SetMaintenancePolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _SETMAINTENANCEPOLICYREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """SetMaintenancePolicyRequest sets the maintenance policy for a cluster. - - Attributes: - project_id: - Required. The Google Developers Console `project ID or project - number `__. - zone: - Required. The name of the Google Compute Engine `zone - `__ in - which the cluster resides. - cluster_id: - Required. The name of the cluster to update. - maintenance_policy: - Required. The maintenance policy to be set for the cluster. An - empty field clears the existing maintenance policy. - name: - The name (project, location, cluster id) of the cluster to set - maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.SetMaintenancePolicyRequest) - }, -) -_sym_db.RegisterMessage(SetMaintenancePolicyRequest) - -ListLocationsRequest = _reflection.GeneratedProtocolMessageType( - "ListLocationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTLOCATIONSREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListLocationsRequest is used to request the locations that offer GKE. - - Attributes: - parent: - Required. Contains the name of the resource requested. - Specified in the format ``projects/*``. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListLocationsRequest) - }, -) -_sym_db.RegisterMessage(ListLocationsRequest) - -ListLocationsResponse = _reflection.GeneratedProtocolMessageType( - "ListLocationsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTLOCATIONSRESPONSE, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListLocationsResponse returns the list of all GKE locations and their - recommendation state. - - Attributes: - locations: - A full list of GKE locations. - next_page_token: - Only return ListLocationsResponse that occur after the - page_token. This value should be populated from the - ListLocationsResponse.next_page_token if that response token - was set (which happens when listing more Locations than fit in - a single ListLocationsResponse). - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListLocationsResponse) - }, -) -_sym_db.RegisterMessage(ListLocationsResponse) - -Location = _reflection.GeneratedProtocolMessageType( - "Location", - (_message.Message,), - { - "DESCRIPTOR": _LOCATION, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Location returns the location name, and if the location is recommended - for GKE cluster scheduling. - - Attributes: - type: - Contains the type of location this Location is for. Regional - or Zonal. - name: - Contains the name of the resource requested. Specified in the - format ``projects/*/locations/*``. - recommended: - Whether the location is recomended for GKE cluster scheduling. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.Location) - }, -) -_sym_db.RegisterMessage(Location) - -StatusCondition = _reflection.GeneratedProtocolMessageType( - "StatusCondition", - (_message.Message,), - { - "DESCRIPTOR": _STATUSCONDITION, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """StatusCondition describes why a cluster or a node pool has a certain - status (e.g., ERROR or DEGRADED). - - Attributes: - code: - Machine-friendly representation of the condition - message: - Human-friendly representation of the condition - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.StatusCondition) - }, -) -_sym_db.RegisterMessage(StatusCondition) - -NetworkConfig = _reflection.GeneratedProtocolMessageType( - "NetworkConfig", - (_message.Message,), - { - "DESCRIPTOR": _NETWORKCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """NetworkConfig reports the relative names of network & subnetwork. - - Attributes: - network: - Output only. The relative name of the Google Compute Engine [n - etwork][google.container.v1beta1.NetworkConfig.network](https: - //cloud.google.com/compute/docs/networks-and- - firewalls#networks) to which the cluster is connected. - Example: projects/my-project/global/networks/my-network - subnetwork: - Output only. The relative name of the Google Compute Engine - `subnetwork `__ to - which the cluster is connected. Example: projects/my- - project/regions/us-central1/subnetworks/my-subnet - enable_intra_node_visibility: - Whether Intra-node visibility is enabled for this cluster. - This makes same node pod to pod traffic visible for VPC - network. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.NetworkConfig) - }, -) -_sym_db.RegisterMessage(NetworkConfig) - -ListUsableSubnetworksRequest = _reflection.GeneratedProtocolMessageType( - "ListUsableSubnetworksRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTUSABLESUBNETWORKSREQUEST, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListUsableSubnetworksRequest requests the list of usable subnetworks. - available to a user for creating clusters. - - Attributes: - parent: - Required. The parent project where subnetworks are usable. - Specified in the format ``projects/*``. - filter: - Filtering currently only supports equality on the - networkProjectId and must be in the form: - “networkProjectId=[PROJECTID]”, where ``networkProjectId`` is - the project which owns the listed subnetworks. This defaults - to the parent project ID. - page_size: - The max number of results per page that should be returned. If - the number of available results is larger than ``page_size``, - a ``next_page_token`` is returned which can be used to get the - next page of results in subsequent requests. Acceptable values - are 0 to 500, inclusive. (Default: 500) - page_token: - Specifies a page token to use. Set this to the nextPageToken - returned by previous list requests to get the next page of - results. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListUsableSubnetworksRequest) - }, -) -_sym_db.RegisterMessage(ListUsableSubnetworksRequest) - -ListUsableSubnetworksResponse = _reflection.GeneratedProtocolMessageType( - "ListUsableSubnetworksResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTUSABLESUBNETWORKSRESPONSE, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """ListUsableSubnetworksResponse is the response of - ListUsableSubnetworksRequest. - - Attributes: - subnetworks: - A list of usable subnetworks in the specified network project. - next_page_token: - This token allows you to get the next page of results for list - requests. If the number of results is larger than - ``page_size``, use the ``next_page_token`` as a value for the - query parameter ``page_token`` in the next request. The value - will become empty when there are no more pages. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ListUsableSubnetworksResponse) - }, -) -_sym_db.RegisterMessage(ListUsableSubnetworksResponse) - -UsableSubnetworkSecondaryRange = _reflection.GeneratedProtocolMessageType( - "UsableSubnetworkSecondaryRange", - (_message.Message,), - { - "DESCRIPTOR": _USABLESUBNETWORKSECONDARYRANGE, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Secondary IP range of a usable subnetwork. - - Attributes: - range_name: - The name associated with this subnetwork secondary range, used - when adding an alias IP range to a VM instance. - ip_cidr_range: - The range of IP addresses belonging to this subnetwork - secondary range. - status: - This field is to determine the status of the secondary range - programmably. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.UsableSubnetworkSecondaryRange) - }, -) -_sym_db.RegisterMessage(UsableSubnetworkSecondaryRange) - -UsableSubnetwork = _reflection.GeneratedProtocolMessageType( - "UsableSubnetwork", - (_message.Message,), - { - "DESCRIPTOR": _USABLESUBNETWORK, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Network Name. Example: projects/my-project/global/networks/my-network - - Attributes: - subnetwork: - Subnetwork Name. Example: projects/my-project/regions/us- - central1/subnetworks/my-subnet - ip_cidr_range: - The range of internal addresses that are owned by this - subnetwork. - secondary_ip_ranges: - Secondary IP ranges. - status_message: - A human readable status message representing the reasons for - cases where the caller cannot use the secondary ranges under - the subnet. For example if the secondary_ip_ranges is empty - due to a permission issue, an insufficient permission message - will be given by status_message. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.UsableSubnetwork) - }, -) -_sym_db.RegisterMessage(UsableSubnetwork) - -VerticalPodAutoscaling = _reflection.GeneratedProtocolMessageType( - "VerticalPodAutoscaling", - (_message.Message,), - { - "DESCRIPTOR": _VERTICALPODAUTOSCALING, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """VerticalPodAutoscaling contains global, per-cluster information - required by Vertical Pod Autoscaler to automatically adjust the - resources of pods controlled by it. - - Attributes: - enabled: - Enables vertical pod autoscaling. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.VerticalPodAutoscaling) - }, -) -_sym_db.RegisterMessage(VerticalPodAutoscaling) - -IntraNodeVisibilityConfig = _reflection.GeneratedProtocolMessageType( - "IntraNodeVisibilityConfig", - (_message.Message,), - { - "DESCRIPTOR": _INTRANODEVISIBILITYCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """IntraNodeVisibilityConfig contains the desired config of the intra- - node visibility on this cluster. - - Attributes: - enabled: - Enables intra node visibility for this cluster. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.IntraNodeVisibilityConfig) - }, -) -_sym_db.RegisterMessage(IntraNodeVisibilityConfig) - -MaxPodsConstraint = _reflection.GeneratedProtocolMessageType( - "MaxPodsConstraint", - (_message.Message,), - { - "DESCRIPTOR": _MAXPODSCONSTRAINT, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Constraints applied to pods. - - Attributes: - max_pods_per_node: - Constraint enforced on the max num of pods per node. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.MaxPodsConstraint) - }, -) -_sym_db.RegisterMessage(MaxPodsConstraint) - -DatabaseEncryption = _reflection.GeneratedProtocolMessageType( - "DatabaseEncryption", - (_message.Message,), - { - "DESCRIPTOR": _DATABASEENCRYPTION, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration of etcd encryption. - - Attributes: - state: - Denotes the state of etcd encryption. - key_name: - Name of CloudKMS key to use for the encryption of secrets in - etcd. Ex. projects/my-project/locations/global/keyRings/my- - ring/cryptoKeys/my-key - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.DatabaseEncryption) - }, -) -_sym_db.RegisterMessage(DatabaseEncryption) - -ResourceUsageExportConfig = _reflection.GeneratedProtocolMessageType( - "ResourceUsageExportConfig", - (_message.Message,), - { - "BigQueryDestination": _reflection.GeneratedProtocolMessageType( - "BigQueryDestination", - (_message.Message,), - { - "DESCRIPTOR": _RESOURCEUSAGEEXPORTCONFIG_BIGQUERYDESTINATION, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Parameters for using BigQuery as the destination of resource usage - export. - - Attributes: - dataset_id: - The ID of a BigQuery Dataset. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ResourceUsageExportConfig.BigQueryDestination) - }, - ), - "ConsumptionMeteringConfig": _reflection.GeneratedProtocolMessageType( - "ConsumptionMeteringConfig", - (_message.Message,), - { - "DESCRIPTOR": _RESOURCEUSAGEEXPORTCONFIG_CONSUMPTIONMETERINGCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Parameters for controlling consumption metering. - - Attributes: - enabled: - Whether to enable consumption metering for this cluster. If - enabled, a second BigQuery table will be created to hold - resource consumption records. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ResourceUsageExportConfig.ConsumptionMeteringConfig) - }, - ), - "DESCRIPTOR": _RESOURCEUSAGEEXPORTCONFIG, - "__module__": "google.cloud.container_v1beta1.proto.cluster_service_pb2", - "__doc__": """Configuration for exporting cluster resource usages. - - Attributes: - bigquery_destination: - Configuration to use BigQuery as usage export destination. - enable_network_egress_metering: - Whether to enable network egress metering for this cluster. If - enabled, a daemonset will be created in the cluster to meter - network egress traffic. - consumption_metering_config: - Configuration to enable resource consumption metering. - """, - # @@protoc_insertion_point(class_scope:google.container.v1beta1.ResourceUsageExportConfig) - }, -) -_sym_db.RegisterMessage(ResourceUsageExportConfig) -_sym_db.RegisterMessage(ResourceUsageExportConfig.BigQueryDestination) -_sym_db.RegisterMessage(ResourceUsageExportConfig.ConsumptionMeteringConfig) - - -DESCRIPTOR._options = None -_NODECONFIG_METADATAENTRY._options = None -_NODECONFIG_LABELSENTRY._options = None -_ADDONSCONFIG.fields_by_name["kubernetes_dashboard"]._options = None -_IPALLOCATIONPOLICY.fields_by_name["cluster_ipv4_cidr"]._options = None -_IPALLOCATIONPOLICY.fields_by_name["node_ipv4_cidr"]._options = None -_IPALLOCATIONPOLICY.fields_by_name["services_ipv4_cidr"]._options = None -_CLUSTER_RESOURCELABELSENTRY._options = None -_CLUSTER.fields_by_name["initial_node_count"]._options = None -_CLUSTER.fields_by_name["node_config"]._options = None -_CLUSTER.fields_by_name["private_cluster"]._options = None -_CLUSTER.fields_by_name["master_ipv4_cidr_block"]._options = None -_CLUSTER.fields_by_name["zone"]._options = None -_CLUSTER.fields_by_name["current_node_version"]._options = None -_CLUSTER.fields_by_name["status_message"]._options = None -_CLUSTER.fields_by_name["instance_group_urls"]._options = None -_CLUSTER.fields_by_name["current_node_count"]._options = None -_OPERATION.fields_by_name["zone"]._options = None -_OPERATION.fields_by_name["status_message"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["zone"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_GETCLUSTERREQUEST.fields_by_name["project_id"]._options = None -_GETCLUSTERREQUEST.fields_by_name["zone"]._options = None -_GETCLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["zone"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["update"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["zone"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["node_pool_id"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["node_version"]._options = None -_UPDATENODEPOOLREQUEST.fields_by_name["image_type"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["project_id"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["zone"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["cluster_id"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["node_pool_id"]._options = None -_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name["autoscaling"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["project_id"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["zone"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["cluster_id"]._options = None -_SETLOGGINGSERVICEREQUEST.fields_by_name["logging_service"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["project_id"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["zone"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["cluster_id"]._options = None -_SETMONITORINGSERVICEREQUEST.fields_by_name["monitoring_service"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["project_id"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["zone"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["cluster_id"]._options = None -_SETADDONSCONFIGREQUEST.fields_by_name["addons_config"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["project_id"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["zone"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["cluster_id"]._options = None -_SETLOCATIONSREQUEST.fields_by_name["locations"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["project_id"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["zone"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["cluster_id"]._options = None -_UPDATEMASTERREQUEST.fields_by_name["master_version"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["project_id"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["zone"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["cluster_id"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["action"]._options = None -_SETMASTERAUTHREQUEST.fields_by_name["update"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["zone"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["project_id"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["zone"]._options = None -_GETOPERATIONREQUEST.fields_by_name["project_id"]._options = None -_GETOPERATIONREQUEST.fields_by_name["zone"]._options = None -_GETOPERATIONREQUEST.fields_by_name["operation_id"]._options = None -_LISTOPERATIONSREQUEST.fields_by_name["project_id"]._options = None -_LISTOPERATIONSREQUEST.fields_by_name["zone"]._options = None -_CANCELOPERATIONREQUEST.fields_by_name["project_id"]._options = None -_CANCELOPERATIONREQUEST.fields_by_name["zone"]._options = None -_CANCELOPERATIONREQUEST.fields_by_name["operation_id"]._options = None -_GETSERVERCONFIGREQUEST.fields_by_name["project_id"]._options = None -_GETSERVERCONFIGREQUEST.fields_by_name["zone"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["zone"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_CREATENODEPOOLREQUEST.fields_by_name["node_pool"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["zone"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_DELETENODEPOOLREQUEST.fields_by_name["node_pool_id"]._options = None -_LISTNODEPOOLSREQUEST.fields_by_name["project_id"]._options = None -_LISTNODEPOOLSREQUEST.fields_by_name["zone"]._options = None -_LISTNODEPOOLSREQUEST.fields_by_name["cluster_id"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["project_id"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["zone"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["cluster_id"]._options = None -_GETNODEPOOLREQUEST.fields_by_name["node_pool_id"]._options = None -_NODEPOOL.fields_by_name["status_message"]._options = None -_MAINTENANCEWINDOW_MAINTENANCEEXCLUSIONSENTRY._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["project_id"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["zone"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["cluster_id"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["node_pool_id"]._options = None -_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name["management"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["project_id"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["zone"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["cluster_id"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["node_pool_id"]._options = None -_SETNODEPOOLSIZEREQUEST.fields_by_name["node_count"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["project_id"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["zone"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["cluster_id"]._options = None -_ROLLBACKNODEPOOLUPGRADEREQUEST.fields_by_name["node_pool_id"]._options = None -_SETLABELSREQUEST_RESOURCELABELSENTRY._options = None -_SETLABELSREQUEST.fields_by_name["project_id"]._options = None -_SETLABELSREQUEST.fields_by_name["zone"]._options = None -_SETLABELSREQUEST.fields_by_name["cluster_id"]._options = None -_SETLABELSREQUEST.fields_by_name["resource_labels"]._options = None -_SETLABELSREQUEST.fields_by_name["label_fingerprint"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["project_id"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["zone"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["cluster_id"]._options = None -_SETLEGACYABACREQUEST.fields_by_name["enabled"]._options = None -_STARTIPROTATIONREQUEST.fields_by_name["project_id"]._options = None -_STARTIPROTATIONREQUEST.fields_by_name["zone"]._options = None -_STARTIPROTATIONREQUEST.fields_by_name["cluster_id"]._options = None -_COMPLETEIPROTATIONREQUEST.fields_by_name["project_id"]._options = None -_COMPLETEIPROTATIONREQUEST.fields_by_name["zone"]._options = None -_COMPLETEIPROTATIONREQUEST.fields_by_name["cluster_id"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["project_id"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["zone"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["cluster_id"]._options = None -_SETNETWORKPOLICYREQUEST.fields_by_name["network_policy"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["project_id"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["zone"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["cluster_id"]._options = None -_SETMAINTENANCEPOLICYREQUEST.fields_by_name["maintenance_policy"]._options = None -_LISTLOCATIONSREQUEST.fields_by_name["parent"]._options = None -_LISTUSABLESUBNETWORKSREQUEST.fields_by_name["parent"]._options = None - -_CLUSTERMANAGER = _descriptor.ServiceDescriptor( - name="ClusterManager", - full_name="google.container.v1beta1.ClusterManager", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\030container.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=18172, - serialized_end=27563, - methods=[ - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.container.v1beta1.ClusterManager.ListClusters", - index=0, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=b"\202\323\344\223\002k\0221/v1beta1/{parent=projects/*/locations/*}/clustersZ6\0224/v1beta1/projects/{project_id}/zones/{zone}/clusters\332A\017project_id,zone", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.container.v1beta1.ClusterManager.GetCluster", - index=1, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=_CLUSTER, - serialized_options=b"\202\323\344\223\002x\0221/v1beta1/{name=projects/*/locations/*/clusters/*}ZC\022A/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}\332A\032project_id,zone,cluster_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateCluster", - full_name="google.container.v1beta1.ClusterManager.CreateCluster", - index=2, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002q"1/v1beta1/{parent=projects/*/locations/*}/clusters:\001*Z9"4/v1beta1/projects/{project_id}/zones/{zone}/clusters:\001*\332A\027project_id,zone,cluster', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateCluster", - full_name="google.container.v1beta1.ClusterManager.UpdateCluster", - index=3, - containing_service=None, - input_type=_UPDATECLUSTERREQUEST, - output_type=_OPERATION, - serialized_options=b"\202\323\344\223\002~\0321/v1beta1/{name=projects/*/locations/*/clusters/*}:\001*ZF\032A/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:\001*\332A!project_id,zone,cluster_id,update", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateNodePool", - full_name="google.container.v1beta1.ClusterManager.UpdateNodePool", - index=4, - containing_service=None, - input_type=_UPDATENODEPOOLREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\252\001\032=/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:\001*Zf"a/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/update:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetNodePoolAutoscaling", - full_name="google.container.v1beta1.ClusterManager.SetNodePoolAutoscaling", - index=5, - containing_service=None, - input_type=_SETNODEPOOLAUTOSCALINGREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\276\001"L/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:setAutoscaling:\001*Zk"f/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/autoscaling:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetLoggingService", - full_name="google.container.v1beta1.ClusterManager.SetLoggingService", - index=6, - containing_service=None, - input_type=_SETLOGGINGSERVICEREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\221\001"/v1beta1/{name=projects/*/locations/*/clusters/*}:setLocations:\001*ZP"K/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/locations:\001*\332A$project_id,zone,cluster_id,locations', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateMaster", - full_name="google.container.v1beta1.ClusterManager.UpdateMaster", - index=10, - containing_service=None, - input_type=_UPDATEMASTERREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\222\001">/v1beta1/{name=projects/*/locations/*/clusters/*}:updateMaster:\001*ZM"H/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/master:\001*\332A)project_id,zone,cluster_id,master_version', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetMasterAuth", - full_name="google.container.v1beta1.ClusterManager.SetMasterAuth", - index=11, - containing_service=None, - input_type=_SETMASTERAUTHREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\232\001"?/v1beta1/{name=projects/*/locations/*/clusters/*}:setMasterAuth:\001*ZT"O/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setMasterAuth:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteCluster", - full_name="google.container.v1beta1.ClusterManager.DeleteCluster", - index=12, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=_OPERATION, - serialized_options=b"\202\323\344\223\002x*1/v1beta1/{name=projects/*/locations/*/clusters/*}ZC*A/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}\332A\032project_id,zone,cluster_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListOperations", - full_name="google.container.v1beta1.ClusterManager.ListOperations", - index=13, - containing_service=None, - input_type=_LISTOPERATIONSREQUEST, - output_type=_LISTOPERATIONSRESPONSE, - serialized_options=b"\202\323\344\223\002o\0223/v1beta1/{parent=projects/*/locations/*}/operationsZ8\0226/v1beta1/projects/{project_id}/zones/{zone}/operations\332A\017project_id,zone", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetOperation", - full_name="google.container.v1beta1.ClusterManager.GetOperation", - index=14, - containing_service=None, - input_type=_GETOPERATIONREQUEST, - output_type=_OPERATION, - serialized_options=b"\202\323\344\223\002~\0223/v1beta1/{name=projects/*/locations/*/operations/*}ZG\022E/v1beta1/projects/{project_id}/zones/{zone}/operations/{operation_id}\332A\034project_id,zone,operation_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CancelOperation", - full_name="google.container.v1beta1.ClusterManager.CancelOperation", - index=15, - containing_service=None, - input_type=_CANCELOPERATIONREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b'\202\323\344\223\002\222\001":/v1beta1/{name=projects/*/locations/*/operations/*}:cancel:\001*ZQ"L/v1beta1/projects/{project_id}/zones/{zone}/operations/{operation_id}:cancel:\001*\332A\034project_id,zone,operation_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetServerConfig", - full_name="google.container.v1beta1.ClusterManager.GetServerConfig", - index=16, - containing_service=None, - input_type=_GETSERVERCONFIGREQUEST, - output_type=_SERVERCONFIG, - serialized_options=b"\202\323\344\223\002q\0223/v1beta1/{name=projects/*/locations/*}/serverConfigZ:\0228/v1beta1/projects/{project_id}/zones/{zone}/serverconfig\332A\017project_id,zone", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListNodePools", - full_name="google.container.v1beta1.ClusterManager.ListNodePools", - index=17, - containing_service=None, - input_type=_LISTNODEPOOLSREQUEST, - output_type=_LISTNODEPOOLSRESPONSE, - serialized_options=b"\202\323\344\223\002\216\001\022=/v1beta1/{parent=projects/*/locations/*/clusters/*}/nodePoolsZM\022K/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools\332A\032project_id,zone,cluster_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetNodePool", - full_name="google.container.v1beta1.ClusterManager.GetNodePool", - index=18, - containing_service=None, - input_type=_GETNODEPOOLREQUEST, - output_type=_NODEPOOL, - serialized_options=b"\202\323\344\223\002\235\001\022=/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}Z\\\022Z/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}\332A'project_id,zone,cluster_id,node_pool_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateNodePool", - full_name="google.container.v1beta1.ClusterManager.CreateNodePool", - index=19, - containing_service=None, - input_type=_CREATENODEPOOLREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\224\001"=/v1beta1/{parent=projects/*/locations/*/clusters/*}/nodePools:\001*ZP"K/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools:\001*\332A$project_id,zone,cluster_id,node_pool', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteNodePool", - full_name="google.container.v1beta1.ClusterManager.DeleteNodePool", - index=20, - containing_service=None, - input_type=_DELETENODEPOOLREQUEST, - output_type=_OPERATION, - serialized_options=b"\202\323\344\223\002\235\001*=/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}Z\\*Z/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}\332A'project_id,zone,cluster_id,node_pool_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="RollbackNodePoolUpgrade", - full_name="google.container.v1beta1.ClusterManager.RollbackNodePoolUpgrade", - index=21, - containing_service=None, - input_type=_ROLLBACKNODEPOOLUPGRADEREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\265\001"F/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:rollback:\001*Zh"c/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}:rollback:\001*\332A\'project_id,zone,cluster_id,node_pool_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetNodePoolManagement", - full_name="google.container.v1beta1.ClusterManager.SetNodePoolManagement", - index=22, - containing_service=None, - input_type=_SETNODEPOOLMANAGEMENTREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\277\001"K/v1beta1/{name=projects/*/locations/*/clusters/*/nodePools/*}:setManagement:\001*Zm"h/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/setManagement:\001*\332A2project_id,zone,cluster_id,node_pool_id,management', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetLabels", - full_name="google.container.v1beta1.ClusterManager.SetLabels", - index=23, - containing_service=None, - input_type=_SETLABELSREQUEST, - output_type=_OPERATION, - serialized_options=b'\202\323\344\223\002\237\001"C/v1beta1/{name=projects/*/locations/*/clusters/*}:setResourceLabels:\001*ZU"P/v1beta1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/resourceLabels:\001*\332A None: + """Instantiate the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = ClusterManagerClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def list_clusters( + self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (:class:`~.cluster_service.ListClustersRequest`): + The request object. ListClustersRequest lists clusters. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_cluster( + self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details for a specific cluster. + + Args: + request (:class:`~.cluster_service.GetClusterRequest`): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to retrieve. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_cluster( + self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (:class:`~.cluster_service.CreateClusterRequest`): + The request object. CreateClusterRequest creates a + cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.cluster_service.Cluster`): + Required. A `cluster + resource `__ + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_cluster( + self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings for a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateClusterRequest`): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (:class:`~.cluster_service.ClusterUpdate`): + Required. A description of the + update. + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, update]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_node_pool( + self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type of a specific + node pool. + + Args: + request (:class:`~.cluster_service.UpdateNodePoolRequest`): + The request object. SetNodePoolVersionRequest updates + the version of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_node_pool_autoscaling( + self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings of a specific node + pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolAutoscalingRequest`): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_autoscaling, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_logging_service( + self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLoggingServiceRequest`): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (:class:`str`): + Required. The logging service the cluster should use to + write metrics. Currently available options: + + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, logging_service]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_logging_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_monitoring_service( + self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetMonitoringServiceRequest`): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (:class:`str`): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com" - the Google Cloud + Monitoring service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, monitoring_service] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_monitoring_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_addons_config( + self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetAddonsConfigRequest`): + The request object. SetAddonsRequest sets the addons + associated with the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (:class:`~.cluster_service.AddonsConfig`): + Required. The desired configurations + for the various addons available to run + in the cluster. + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, addons_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_addons_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_locations( + self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLocationsRequest`): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (:class:`Sequence[str]`): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, locations]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_locations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_master( + self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateMasterRequest`): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (:class:`str`): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, master_version]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_master, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_master_auth( + self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (:class:`~.cluster_service.SetMasterAuthRequest`): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_master_auth, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_cluster( + self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (:class:`~.cluster_service.DeleteClusterRequest`): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_operations( + self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in the specified + zone or all zones. + + Args: + request (:class:`~.cluster_service.ListOperationsRequest`): + The request object. ListOperationsRequest lists + operations. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (:class:`~.cluster_service.GetOperationRequest`): + The request object. GetOperationRequest gets a single + operation. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def cancel_operation( + self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (:class:`~.cluster_service.CancelOperationRequest`): + The request object. CancelOperationRequest cancels a + single operation. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CancelOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def get_server_config( + self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (:class:`~.cluster_service.GetServerConfigRequest`): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetServerConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_server_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_node_pools( + self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (:class:`~.cluster_service.ListNodePoolsRequest`): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListNodePoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_node_pools, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_node_pool( + self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (:class:`~.cluster_service.GetNodePoolRequest`): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_node_pool( + self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (:class:`~.cluster_service.CreateNodePoolRequest`): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (:class:`~.cluster_service.NodePool`): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_node_pool( + self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (:class:`~.cluster_service.DeleteNodePoolRequest`): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def rollback_node_pool_upgrade( + self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (:class:`~.cluster_service.RollbackNodePoolUpgradeRequest`): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to rollback. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to rollback. This field has + been deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.rollback_node_pool_upgrade, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_node_pool_management( + self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + management: cluster_service.NodeManagement = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolManagementRequest`): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + management (:class:`~.cluster_service.NodeManagement`): + Required. NodeManagement + configuration for the node pool. + This corresponds to the ``management`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, management] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetNodePoolManagementRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if management is not None: + request.management = management + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_management, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_labels( + self, + request: cluster_service.SetLabelsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + resource_labels: Sequence[ + cluster_service.SetLabelsRequest.ResourceLabelsEntry + ] = None, + label_fingerprint: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (:class:`~.cluster_service.SetLabelsRequest`): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_labels (:class:`Sequence[~.cluster_service.SetLabelsRequest.ResourceLabelsEntry]`): + Required. The labels to set for that + cluster. + This corresponds to the ``resource_labels`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + label_fingerprint (:class:`str`): + Required. The fingerprint of the + previous set of labels for this + resource, used to detect conflicts. The + fingerprint is initially generated by + Kubernetes Engine and changes after + every request to modify or update + labels. You must always provide an up- + to-date fingerprint hash when updating + or changing labels. Make a + get() request to the + resource to get the latest fingerprint. + This corresponds to the ``label_fingerprint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, resource_labels, label_fingerprint] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLabelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if resource_labels is not None: + request.resource_labels = resource_labels + if label_fingerprint is not None: + request.label_fingerprint = label_fingerprint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_labels, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_legacy_abac( + self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (:class:`~.cluster_service.SetLegacyAbacRequest`): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (:class:`bool`): + Required. Whether ABAC authorization + will be enabled in the cluster. + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, enabled]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_legacy_abac, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def start_ip_rotation( + self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (:class:`~.cluster_service.StartIPRotationRequest`): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.StartIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.start_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def complete_ip_rotation( + self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (:class:`~.cluster_service.CompleteIPRotationRequest`): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CompleteIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.complete_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_node_pool_size( + self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolSizeRequest`): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_size, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_network_policy( + self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetNetworkPolicyRequest`): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (:class:`~.cluster_service.NetworkPolicy`): + Required. Configuration options for + the NetworkPolicy feature. + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, network_policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_network_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_maintenance_policy( + self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetMaintenancePolicyRequest`): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (:class:`str`): + Required. The Google Developers Console `project ID or + project + number `__. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The name of the cluster to + update. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (:class:`~.cluster_service.MaintenancePolicy`): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, maintenance_policy] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_maintenance_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_usable_subnetworks( + self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksAsyncPager: + r"""Lists subnetworks that can be used for creating + clusters in a project. + + Args: + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks. available to a + user for creating clusters. + parent (:class:`str`): + Required. The parent project where subnetworks are + usable. Specified in the format ``projects/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUsableSubnetworksAsyncPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListUsableSubnetworksRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_usable_subnetworks, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListUsableSubnetworksAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: cluster_service.ListLocationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListLocationsResponse: + r"""Fetches locations that offer Google Kubernetes + Engine. + + Args: + request (:class:`~.cluster_service.ListLocationsRequest`): + The request object. ListLocationsRequest is used to + request the locations that offer GKE. + parent (:class:`str`): + Required. Contains the name of the resource requested. + Specified in the format ``projects/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListLocationsResponse: + ListLocationsResponse returns the + list of all GKE locations and their + recommendation state. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-container",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterManagerAsyncClient",) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/client.py b/google/cloud/container_v1beta1/services/cluster_manager/client.py new file mode 100644 index 00000000..58dffe37 --- /dev/null +++ b/google/cloud/container_v1beta1/services/cluster_manager/client.py @@ -0,0 +1,3348 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.container_v1beta1.services.cluster_manager import pagers +from google.cloud.container_v1beta1.types import cluster_service + +from .transports.base import ClusterManagerTransport +from .transports.grpc import ClusterManagerGrpcTransport +from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +class ClusterManagerClientMeta(type): + """Metaclass for the ClusterManager client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ClusterManagerTransport]] + _transport_registry["grpc"] = ClusterManagerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[ClusterManagerTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterManagerClient(metaclass=ClusterManagerClientMeta): + """Google Kubernetes Engine Cluster Manager v1beta1""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "container.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ClusterManagerTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterManagerTransport): + # transport is a ClusterManagerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + ) + + def list_clusters( + self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (:class:`~.cluster_service.ListClustersRequest`): + The request object. ListClustersRequest lists clusters. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_clusters, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_cluster( + self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details for a specific cluster. + + Args: + request (:class:`~.cluster_service.GetClusterRequest`): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to retrieve. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_cluster, default_timeout=None, client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_cluster( + self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (:class:`~.cluster_service.CreateClusterRequest`): + The request object. CreateClusterRequest creates a + cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.cluster_service.Cluster`): + Required. A `cluster + resource `__ + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.create_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_cluster( + self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings for a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateClusterRequest`): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (:class:`~.cluster_service.ClusterUpdate`): + Required. A description of the + update. + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, update]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.update_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_node_pool( + self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type of a specific + node pool. + + Args: + request (:class:`~.cluster_service.UpdateNodePoolRequest`): + The request object. SetNodePoolVersionRequest updates + the version of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.update_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_node_pool_autoscaling( + self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings of a specific node + pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolAutoscalingRequest`): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_node_pool_autoscaling, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_logging_service( + self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLoggingServiceRequest`): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (:class:`str`): + Required. The logging service the cluster should use to + write metrics. Currently available options: + + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, logging_service]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_logging_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_monitoring_service( + self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetMonitoringServiceRequest`): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (:class:`str`): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com" - the Google Cloud + Monitoring service + - "none" - no metrics will be exported from the cluster + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, monitoring_service] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_monitoring_service, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_addons_config( + self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetAddonsConfigRequest`): + The request object. SetAddonsRequest sets the addons + associated with the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (:class:`~.cluster_service.AddonsConfig`): + Required. The desired configurations + for the various addons available to run + in the cluster. + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, addons_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_addons_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_locations( + self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. + + Args: + request (:class:`~.cluster_service.SetLocationsRequest`): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (:class:`Sequence[str]`): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, locations]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_locations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_master( + self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (:class:`~.cluster_service.UpdateMasterRequest`): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (:class:`str`): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, master_version]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.update_master, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_master_auth( + self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (:class:`~.cluster_service.SetMasterAuthRequest`): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_master_auth, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_cluster( + self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (:class:`~.cluster_service.DeleteClusterRequest`): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_cluster, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_operations( + self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in the specified + zone or all zones. + + Args: + request (:class:`~.cluster_service.ListOperationsRequest`): + The request object. ListOperationsRequest lists + operations. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (:class:`~.cluster_service.GetOperationRequest`): + The request object. GetOperationRequest gets a single + operation. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def cancel_operation( + self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (:class:`~.cluster_service.CancelOperationRequest`): + The request object. CancelOperationRequest cancels a + single operation. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, operation_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CancelOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def get_server_config( + self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (:class:`~.cluster_service.GetServerConfigRequest`): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetServerConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_server_config, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_node_pools( + self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (:class:`~.cluster_service.ListNodePoolsRequest`): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListNodePoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_node_pools, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_node_pool( + self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (:class:`~.cluster_service.GetNodePoolRequest`): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_node_pool( + self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (:class:`~.cluster_service.CreateNodePoolRequest`): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (:class:`~.cluster_service.NodePool`): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.create_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_node_pool( + self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (:class:`~.cluster_service.DeleteNodePoolRequest`): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to delete. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_node_pool, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def rollback_node_pool_upgrade( + self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (:class:`~.cluster_service.RollbackNodePoolUpgradeRequest`): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to rollback. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to rollback. This field has + been deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.rollback_node_pool_upgrade, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_node_pool_management( + self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + management: cluster_service.NodeManagement = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolManagementRequest`): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + management (:class:`~.cluster_service.NodeManagement`): + Required. NodeManagement + configuration for the node pool. + This corresponds to the ``management`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, node_pool_id, management] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetNodePoolManagementRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if management is not None: + request.management = management + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_node_pool_management, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_labels( + self, + request: cluster_service.SetLabelsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + resource_labels: Sequence[ + cluster_service.SetLabelsRequest.ResourceLabelsEntry + ] = None, + label_fingerprint: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (:class:`~.cluster_service.SetLabelsRequest`): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_labels (:class:`Sequence[~.cluster_service.SetLabelsRequest.ResourceLabelsEntry]`): + Required. The labels to set for that + cluster. + This corresponds to the ``resource_labels`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + label_fingerprint (:class:`str`): + Required. The fingerprint of the + previous set of labels for this + resource, used to detect conflicts. The + fingerprint is initially generated by + Kubernetes Engine and changes after + every request to modify or update + labels. You must always provide an up- + to-date fingerprint hash when updating + or changing labels. Make a + get() request to the + resource to get the latest fingerprint. + This corresponds to the ``label_fingerprint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, resource_labels, label_fingerprint] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLabelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if resource_labels is not None: + request.resource_labels = resource_labels + if label_fingerprint is not None: + request.label_fingerprint = label_fingerprint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_labels, default_timeout=None, client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_legacy_abac( + self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (:class:`~.cluster_service.SetLegacyAbacRequest`): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (:class:`bool`): + Required. Whether ABAC authorization + will be enabled in the cluster. + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, enabled]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_legacy_abac, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def start_ip_rotation( + self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (:class:`~.cluster_service.StartIPRotationRequest`): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.StartIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.start_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def complete_ip_rotation( + self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (:class:`~.cluster_service.CompleteIPRotationRequest`): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.CompleteIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.complete_ip_rotation, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_node_pool_size( + self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (:class:`~.cluster_service.SetNodePoolSizeRequest`): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_node_pool_size, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_network_policy( + self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetNetworkPolicyRequest`): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (:class:`~.cluster_service.NetworkPolicy`): + Required. Configuration options for + the NetworkPolicy feature. + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, zone, cluster_id, network_policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_network_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_maintenance_policy( + self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (:class:`~.cluster_service.SetMaintenancePolicyRequest`): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (:class:`str`): + Required. The Google Developers Console `project ID or + project + number `__. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The name of the cluster to + update. + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (:class:`~.cluster_service.MaintenancePolicy`): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, zone, cluster_id, maintenance_policy] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_maintenance_policy, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_usable_subnetworks( + self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksPager: + r"""Lists subnetworks that can be used for creating + clusters in a project. + + Args: + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks. available to a + user for creating clusters. + parent (:class:`str`): + Required. The parent project where subnetworks are + usable. Specified in the format ``projects/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUsableSubnetworksPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListUsableSubnetworksRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_usable_subnetworks, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListUsableSubnetworksPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: cluster_service.ListLocationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListLocationsResponse: + r"""Fetches locations that offer Google Kubernetes + Engine. + + Args: + request (:class:`~.cluster_service.ListLocationsRequest`): + The request object. ListLocationsRequest is used to + request the locations that offer GKE. + parent (:class:`str`): + Required. Contains the name of the resource requested. + Specified in the format ``projects/*``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cluster_service.ListLocationsResponse: + ListLocationsResponse returns the + list of all GKE locations and their + recommendation state. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cluster_service.ListLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-container",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterManagerClient",) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/pagers.py b/google/cloud/container_v1beta1/services/cluster_manager/pagers.py new file mode 100644 index 00000000..cd8bb39d --- /dev/null +++ b/google/cloud/container_v1beta1/services/cluster_manager/pagers.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.container_v1beta1.types import cluster_service + + +class ListUsableSubnetworksPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`~.cluster_service.ListUsableSubnetworksResponse` object, and + provides an ``__iter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`~.cluster_service.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., cluster_service.ListUsableSubnetworksResponse], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The initial request object. + response (:class:`~.cluster_service.ListUsableSubnetworksResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[cluster_service.UsableSubnetwork]: + for page in self.pages: + yield from page.subnetworks + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListUsableSubnetworksAsyncPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`~.cluster_service.ListUsableSubnetworksResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`~.cluster_service.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[cluster_service.ListUsableSubnetworksResponse]], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.cluster_service.ListUsableSubnetworksRequest`): + The initial request object. + response (:class:`~.cluster_service.ListUsableSubnetworksResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[cluster_service.UsableSubnetwork]: + async def async_generator(): + async for page in self.pages: + for response in page.subnetworks: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/transports/__init__.py b/google/cloud/container_v1beta1/services/cluster_manager/transports/__init__.py new file mode 100644 index 00000000..71d01ec8 --- /dev/null +++ b/google/cloud/container_v1beta1/services/cluster_manager/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterManagerTransport +from .grpc import ClusterManagerGrpcTransport +from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] +_transport_registry["grpc"] = ClusterManagerGrpcTransport +_transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport + + +__all__ = ( + "ClusterManagerTransport", + "ClusterManagerGrpcTransport", + "ClusterManagerGrpcAsyncIOTransport", +) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py b/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py new file mode 100644 index 00000000..18d6e7c3 --- /dev/null +++ b/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py @@ -0,0 +1,435 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing + +from google import auth +from google.api_core import exceptions # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.container_v1beta1.types import cluster_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +class ClusterManagerTransport(abc.ABC): + """Abstract transport class for ClusterManager.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes + ) + elif credentials is None: + credentials, _ = auth.default(scopes=scopes) + + # Save the credentials. + self._credentials = credentials + + @property + def list_clusters( + self, + ) -> typing.Callable[ + [cluster_service.ListClustersRequest], + typing.Union[ + cluster_service.ListClustersResponse, + typing.Awaitable[cluster_service.ListClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_cluster( + self, + ) -> typing.Callable[ + [cluster_service.GetClusterRequest], + typing.Union[ + cluster_service.Cluster, typing.Awaitable[cluster_service.Cluster] + ], + ]: + raise NotImplementedError() + + @property + def create_cluster( + self, + ) -> typing.Callable[ + [cluster_service.CreateClusterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def update_cluster( + self, + ) -> typing.Callable[ + [cluster_service.UpdateClusterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def update_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.UpdateNodePoolRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_node_pool_autoscaling( + self, + ) -> typing.Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_logging_service( + self, + ) -> typing.Callable[ + [cluster_service.SetLoggingServiceRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_monitoring_service( + self, + ) -> typing.Callable[ + [cluster_service.SetMonitoringServiceRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_addons_config( + self, + ) -> typing.Callable[ + [cluster_service.SetAddonsConfigRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_locations( + self, + ) -> typing.Callable[ + [cluster_service.SetLocationsRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def update_master( + self, + ) -> typing.Callable[ + [cluster_service.UpdateMasterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_master_auth( + self, + ) -> typing.Callable[ + [cluster_service.SetMasterAuthRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def delete_cluster( + self, + ) -> typing.Callable[ + [cluster_service.DeleteClusterRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> typing.Callable[ + [cluster_service.ListOperationsRequest], + typing.Union[ + cluster_service.ListOperationsResponse, + typing.Awaitable[cluster_service.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> typing.Callable[ + [cluster_service.GetOperationRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> typing.Callable[ + [cluster_service.CancelOperationRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def get_server_config( + self, + ) -> typing.Callable[ + [cluster_service.GetServerConfigRequest], + typing.Union[ + cluster_service.ServerConfig, typing.Awaitable[cluster_service.ServerConfig] + ], + ]: + raise NotImplementedError() + + @property + def list_node_pools( + self, + ) -> typing.Callable[ + [cluster_service.ListNodePoolsRequest], + typing.Union[ + cluster_service.ListNodePoolsResponse, + typing.Awaitable[cluster_service.ListNodePoolsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.GetNodePoolRequest], + typing.Union[ + cluster_service.NodePool, typing.Awaitable[cluster_service.NodePool] + ], + ]: + raise NotImplementedError() + + @property + def create_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.CreateNodePoolRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def delete_node_pool( + self, + ) -> typing.Callable[ + [cluster_service.DeleteNodePoolRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def rollback_node_pool_upgrade( + self, + ) -> typing.Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_node_pool_management( + self, + ) -> typing.Callable[ + [cluster_service.SetNodePoolManagementRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_labels( + self, + ) -> typing.Callable[ + [cluster_service.SetLabelsRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_legacy_abac( + self, + ) -> typing.Callable[ + [cluster_service.SetLegacyAbacRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def start_ip_rotation( + self, + ) -> typing.Callable[ + [cluster_service.StartIPRotationRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def complete_ip_rotation( + self, + ) -> typing.Callable[ + [cluster_service.CompleteIPRotationRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_node_pool_size( + self, + ) -> typing.Callable[ + [cluster_service.SetNodePoolSizeRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_network_policy( + self, + ) -> typing.Callable[ + [cluster_service.SetNetworkPolicyRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def set_maintenance_policy( + self, + ) -> typing.Callable[ + [cluster_service.SetMaintenancePolicyRequest], + typing.Union[ + cluster_service.Operation, typing.Awaitable[cluster_service.Operation] + ], + ]: + raise NotImplementedError() + + @property + def list_usable_subnetworks( + self, + ) -> typing.Callable[ + [cluster_service.ListUsableSubnetworksRequest], + typing.Union[ + cluster_service.ListUsableSubnetworksResponse, + typing.Awaitable[cluster_service.ListUsableSubnetworksResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> typing.Callable[ + [cluster_service.ListLocationsRequest], + typing.Union[ + cluster_service.ListLocationsResponse, + typing.Awaitable[cluster_service.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("ClusterManagerTransport",) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py new file mode 100644 index 00000000..f1914169 --- /dev/null +++ b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py @@ -0,0 +1,1090 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.container_v1beta1.types import cluster_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ClusterManagerTransport + + +class ClusterManagerGrpcTransport(ClusterManagerTransport): + """gRPC backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1beta1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default(scopes=self.AUTH_SCOPES) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + ) + + self._stubs = {} # type: Dict[str, Callable] + + @classmethod + def create_channel( + cls, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def list_clusters( + self, + ) -> Callable[ + [cluster_service.ListClustersRequest], cluster_service.ListClustersResponse + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListClusters", + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def get_cluster( + self, + ) -> Callable[[cluster_service.GetClusterRequest], cluster_service.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details for a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetCluster", + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def create_cluster( + self, + ) -> Callable[[cluster_service.CreateClusterRequest], cluster_service.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CreateCluster", + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[[cluster_service.UpdateClusterRequest], cluster_service.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings for a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/UpdateCluster", + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_cluster"] + + @property + def update_node_pool( + self, + ) -> Callable[[cluster_service.UpdateNodePoolRequest], cluster_service.Operation]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type of a specific + node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_node_pool" not in self._stubs: + self._stubs["update_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/UpdateNodePool", + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_node_pool"] + + @property + def set_node_pool_autoscaling( + self, + ) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], cluster_service.Operation + ]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings of a specific node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_autoscaling" not in self._stubs: + self._stubs["set_node_pool_autoscaling"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling", + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_autoscaling"] + + @property + def set_logging_service( + self, + ) -> Callable[ + [cluster_service.SetLoggingServiceRequest], cluster_service.Operation + ]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_logging_service" not in self._stubs: + self._stubs["set_logging_service"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLoggingService", + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_logging_service"] + + @property + def set_monitoring_service( + self, + ) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], cluster_service.Operation + ]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_monitoring_service" not in self._stubs: + self._stubs["set_monitoring_service"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetMonitoringService", + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_monitoring_service"] + + @property + def set_addons_config( + self, + ) -> Callable[[cluster_service.SetAddonsConfigRequest], cluster_service.Operation]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_addons_config" not in self._stubs: + self._stubs["set_addons_config"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetAddonsConfig", + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_addons_config"] + + @property + def set_locations( + self, + ) -> Callable[[cluster_service.SetLocationsRequest], cluster_service.Operation]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. + + Returns: + Callable[[~.SetLocationsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_locations" not in self._stubs: + self._stubs["set_locations"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLocations", + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_locations"] + + @property + def update_master( + self, + ) -> Callable[[cluster_service.UpdateMasterRequest], cluster_service.Operation]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_master" not in self._stubs: + self._stubs["update_master"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/UpdateMaster", + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_master"] + + @property + def set_master_auth( + self, + ) -> Callable[[cluster_service.SetMasterAuthRequest], cluster_service.Operation]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_master_auth" not in self._stubs: + self._stubs["set_master_auth"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetMasterAuth", + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_master_auth"] + + @property + def delete_cluster( + self, + ) -> Callable[[cluster_service.DeleteClusterRequest], cluster_service.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/DeleteCluster", + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_cluster"] + + @property + def list_operations( + self, + ) -> Callable[ + [cluster_service.ListOperationsRequest], cluster_service.ListOperationsResponse + ]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in the specified + zone or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + ~.ListOperationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListOperations", + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs["list_operations"] + + @property + def get_operation( + self, + ) -> Callable[[cluster_service.GetOperationRequest], cluster_service.Operation]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetOperation", + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["get_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[cluster_service.CancelOperationRequest], empty.Empty]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CancelOperation", + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["cancel_operation"] + + @property + def get_server_config( + self, + ) -> Callable[ + [cluster_service.GetServerConfigRequest], cluster_service.ServerConfig + ]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + ~.ServerConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_server_config" not in self._stubs: + self._stubs["get_server_config"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetServerConfig", + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs["get_server_config"] + + @property + def list_node_pools( + self, + ) -> Callable[ + [cluster_service.ListNodePoolsRequest], cluster_service.ListNodePoolsResponse + ]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + ~.ListNodePoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_node_pools" not in self._stubs: + self._stubs["list_node_pools"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListNodePools", + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs["list_node_pools"] + + @property + def get_node_pool( + self, + ) -> Callable[[cluster_service.GetNodePoolRequest], cluster_service.NodePool]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + ~.NodePool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_node_pool" not in self._stubs: + self._stubs["get_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetNodePool", + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs["get_node_pool"] + + @property + def create_node_pool( + self, + ) -> Callable[[cluster_service.CreateNodePoolRequest], cluster_service.Operation]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_node_pool" not in self._stubs: + self._stubs["create_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CreateNodePool", + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_node_pool"] + + @property + def delete_node_pool( + self, + ) -> Callable[[cluster_service.DeleteNodePoolRequest], cluster_service.Operation]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_node_pool" not in self._stubs: + self._stubs["delete_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/DeleteNodePool", + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_node_pool"] + + @property + def rollback_node_pool_upgrade( + self, + ) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], cluster_service.Operation + ]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rollback_node_pool_upgrade" not in self._stubs: + self._stubs["rollback_node_pool_upgrade"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade", + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["rollback_node_pool_upgrade"] + + @property + def set_node_pool_management( + self, + ) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], cluster_service.Operation + ]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_management" not in self._stubs: + self._stubs["set_node_pool_management"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNodePoolManagement", + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_management"] + + @property + def set_labels( + self, + ) -> Callable[[cluster_service.SetLabelsRequest], cluster_service.Operation]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_labels" not in self._stubs: + self._stubs["set_labels"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLabels", + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_labels"] + + @property + def set_legacy_abac( + self, + ) -> Callable[[cluster_service.SetLegacyAbacRequest], cluster_service.Operation]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_legacy_abac" not in self._stubs: + self._stubs["set_legacy_abac"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLegacyAbac", + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_legacy_abac"] + + @property + def start_ip_rotation( + self, + ) -> Callable[[cluster_service.StartIPRotationRequest], cluster_service.Operation]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_ip_rotation" not in self._stubs: + self._stubs["start_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/StartIPRotation", + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["start_ip_rotation"] + + @property + def complete_ip_rotation( + self, + ) -> Callable[ + [cluster_service.CompleteIPRotationRequest], cluster_service.Operation + ]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "complete_ip_rotation" not in self._stubs: + self._stubs["complete_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CompleteIPRotation", + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["complete_ip_rotation"] + + @property + def set_node_pool_size( + self, + ) -> Callable[[cluster_service.SetNodePoolSizeRequest], cluster_service.Operation]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_size" not in self._stubs: + self._stubs["set_node_pool_size"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNodePoolSize", + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_size"] + + @property + def set_network_policy( + self, + ) -> Callable[[cluster_service.SetNetworkPolicyRequest], cluster_service.Operation]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_network_policy" not in self._stubs: + self._stubs["set_network_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNetworkPolicy", + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_network_policy"] + + @property + def set_maintenance_policy( + self, + ) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], cluster_service.Operation + ]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_maintenance_policy" not in self._stubs: + self._stubs["set_maintenance_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetMaintenancePolicy", + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_maintenance_policy"] + + @property + def list_usable_subnetworks( + self, + ) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + cluster_service.ListUsableSubnetworksResponse, + ]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that can be used for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + ~.ListUsableSubnetworksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_usable_subnetworks" not in self._stubs: + self._stubs["list_usable_subnetworks"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListUsableSubnetworks", + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs["list_usable_subnetworks"] + + @property + def list_locations( + self, + ) -> Callable[ + [cluster_service.ListLocationsRequest], cluster_service.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC. + + Fetches locations that offer Google Kubernetes + Engine. + + Returns: + Callable[[~.ListLocationsRequest], + ~.ListLocationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListLocations", + request_serializer=cluster_service.ListLocationsRequest.serialize, + response_deserializer=cluster_service.ListLocationsResponse.deserialize, + ) + return self._stubs["list_locations"] + + +__all__ = ("ClusterManagerGrpcTransport",) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py new file mode 100644 index 00000000..a53feaa3 --- /dev/null +++ b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py @@ -0,0 +1,1132 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.container_v1beta1.types import cluster_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ClusterManagerTransport +from .grpc import ClusterManagerGrpcTransport + + +class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): + """gRPC AsyncIO backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1beta1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + **kwargs + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + **kwargs + ) + + def __init__( + self, + *, + host: str = "container.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def list_clusters( + self, + ) -> Callable[ + [cluster_service.ListClustersRequest], + Awaitable[cluster_service.ListClustersResponse], + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListClusters", + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def get_cluster( + self, + ) -> Callable[ + [cluster_service.GetClusterRequest], Awaitable[cluster_service.Cluster] + ]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details for a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetCluster", + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def create_cluster( + self, + ) -> Callable[ + [cluster_service.CreateClusterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CreateCluster", + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[ + [cluster_service.UpdateClusterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings for a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/UpdateCluster", + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_cluster"] + + @property + def update_node_pool( + self, + ) -> Callable[ + [cluster_service.UpdateNodePoolRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type of a specific + node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_node_pool" not in self._stubs: + self._stubs["update_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/UpdateNodePool", + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_node_pool"] + + @property + def set_node_pool_autoscaling( + self, + ) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings of a specific node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_autoscaling" not in self._stubs: + self._stubs["set_node_pool_autoscaling"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling", + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_autoscaling"] + + @property + def set_logging_service( + self, + ) -> Callable[ + [cluster_service.SetLoggingServiceRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_logging_service" not in self._stubs: + self._stubs["set_logging_service"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLoggingService", + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_logging_service"] + + @property + def set_monitoring_service( + self, + ) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_monitoring_service" not in self._stubs: + self._stubs["set_monitoring_service"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetMonitoringService", + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_monitoring_service"] + + @property + def set_addons_config( + self, + ) -> Callable[ + [cluster_service.SetAddonsConfigRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_addons_config" not in self._stubs: + self._stubs["set_addons_config"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetAddonsConfig", + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_addons_config"] + + @property + def set_locations( + self, + ) -> Callable[ + [cluster_service.SetLocationsRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. + + Returns: + Callable[[~.SetLocationsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_locations" not in self._stubs: + self._stubs["set_locations"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLocations", + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_locations"] + + @property + def update_master( + self, + ) -> Callable[ + [cluster_service.UpdateMasterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_master" not in self._stubs: + self._stubs["update_master"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/UpdateMaster", + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["update_master"] + + @property + def set_master_auth( + self, + ) -> Callable[ + [cluster_service.SetMasterAuthRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_master_auth" not in self._stubs: + self._stubs["set_master_auth"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetMasterAuth", + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_master_auth"] + + @property + def delete_cluster( + self, + ) -> Callable[ + [cluster_service.DeleteClusterRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/DeleteCluster", + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_cluster"] + + @property + def list_operations( + self, + ) -> Callable[ + [cluster_service.ListOperationsRequest], + Awaitable[cluster_service.ListOperationsResponse], + ]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in the specified + zone or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + Awaitable[~.ListOperationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListOperations", + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs["list_operations"] + + @property + def get_operation( + self, + ) -> Callable[ + [cluster_service.GetOperationRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetOperation", + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["get_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[cluster_service.CancelOperationRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CancelOperation", + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["cancel_operation"] + + @property + def get_server_config( + self, + ) -> Callable[ + [cluster_service.GetServerConfigRequest], + Awaitable[cluster_service.ServerConfig], + ]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + Awaitable[~.ServerConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_server_config" not in self._stubs: + self._stubs["get_server_config"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetServerConfig", + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs["get_server_config"] + + @property + def list_node_pools( + self, + ) -> Callable[ + [cluster_service.ListNodePoolsRequest], + Awaitable[cluster_service.ListNodePoolsResponse], + ]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + Awaitable[~.ListNodePoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_node_pools" not in self._stubs: + self._stubs["list_node_pools"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListNodePools", + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs["list_node_pools"] + + @property + def get_node_pool( + self, + ) -> Callable[ + [cluster_service.GetNodePoolRequest], Awaitable[cluster_service.NodePool] + ]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + Awaitable[~.NodePool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_node_pool" not in self._stubs: + self._stubs["get_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/GetNodePool", + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs["get_node_pool"] + + @property + def create_node_pool( + self, + ) -> Callable[ + [cluster_service.CreateNodePoolRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_node_pool" not in self._stubs: + self._stubs["create_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CreateNodePool", + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["create_node_pool"] + + @property + def delete_node_pool( + self, + ) -> Callable[ + [cluster_service.DeleteNodePoolRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_node_pool" not in self._stubs: + self._stubs["delete_node_pool"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/DeleteNodePool", + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["delete_node_pool"] + + @property + def rollback_node_pool_upgrade( + self, + ) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rollback_node_pool_upgrade" not in self._stubs: + self._stubs["rollback_node_pool_upgrade"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade", + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["rollback_node_pool_upgrade"] + + @property + def set_node_pool_management( + self, + ) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_management" not in self._stubs: + self._stubs["set_node_pool_management"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNodePoolManagement", + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_management"] + + @property + def set_labels( + self, + ) -> Callable[ + [cluster_service.SetLabelsRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_labels" not in self._stubs: + self._stubs["set_labels"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLabels", + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_labels"] + + @property + def set_legacy_abac( + self, + ) -> Callable[ + [cluster_service.SetLegacyAbacRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_legacy_abac" not in self._stubs: + self._stubs["set_legacy_abac"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetLegacyAbac", + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_legacy_abac"] + + @property + def start_ip_rotation( + self, + ) -> Callable[ + [cluster_service.StartIPRotationRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_ip_rotation" not in self._stubs: + self._stubs["start_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/StartIPRotation", + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["start_ip_rotation"] + + @property + def complete_ip_rotation( + self, + ) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "complete_ip_rotation" not in self._stubs: + self._stubs["complete_ip_rotation"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/CompleteIPRotation", + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["complete_ip_rotation"] + + @property + def set_node_pool_size( + self, + ) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_node_pool_size" not in self._stubs: + self._stubs["set_node_pool_size"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNodePoolSize", + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_node_pool_size"] + + @property + def set_network_policy( + self, + ) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], Awaitable[cluster_service.Operation] + ]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_network_policy" not in self._stubs: + self._stubs["set_network_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetNetworkPolicy", + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_network_policy"] + + @property + def set_maintenance_policy( + self, + ) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + Awaitable[cluster_service.Operation], + ]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_maintenance_policy" not in self._stubs: + self._stubs["set_maintenance_policy"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/SetMaintenancePolicy", + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs["set_maintenance_policy"] + + @property + def list_usable_subnetworks( + self, + ) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + Awaitable[cluster_service.ListUsableSubnetworksResponse], + ]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that can be used for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + Awaitable[~.ListUsableSubnetworksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_usable_subnetworks" not in self._stubs: + self._stubs["list_usable_subnetworks"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListUsableSubnetworks", + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs["list_usable_subnetworks"] + + @property + def list_locations( + self, + ) -> Callable[ + [cluster_service.ListLocationsRequest], + Awaitable[cluster_service.ListLocationsResponse], + ]: + r"""Return a callable for the list locations method over gRPC. + + Fetches locations that offer Google Kubernetes + Engine. + + Returns: + Callable[[~.ListLocationsRequest], + Awaitable[~.ListLocationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.container.v1beta1.ClusterManager/ListLocations", + request_serializer=cluster_service.ListLocationsRequest.serialize, + response_deserializer=cluster_service.ListLocationsResponse.deserialize, + ) + return self._stubs["list_locations"] + + +__all__ = ("ClusterManagerGrpcAsyncIOTransport",) diff --git a/google/cloud/container_v1beta1/types.py b/google/cloud/container_v1beta1/types.py deleted file mode 100644 index d239cc93..00000000 --- a/google/cloud/container_v1beta1/types.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.container_v1beta1.proto import cluster_service_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import timestamp_pb2 - - -_shared_modules = [ - empty_pb2, - timestamp_pb2, -] - -_local_modules = [ - cluster_service_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.container_v1beta1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/container_v1beta1/types/__init__.py b/google/cloud/container_v1beta1/types/__init__.py new file mode 100644 index 00000000..05ef29f6 --- /dev/null +++ b/google/cloud/container_v1beta1/types/__init__.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .cluster_service import ( + NodeConfig, + ShieldedInstanceConfig, + NodeTaint, + MasterAuth, + ClientCertificateConfig, + AddonsConfig, + HttpLoadBalancing, + HorizontalPodAutoscaling, + KubernetesDashboard, + NetworkPolicyConfig, + PrivateClusterConfig, + IstioConfig, + CloudRunConfig, + MasterAuthorizedNetworksConfig, + LegacyAbac, + NetworkPolicy, + IPAllocationPolicy, + BinaryAuthorization, + PodSecurityPolicyConfig, + AuthenticatorGroupsConfig, + Cluster, + ClusterUpdate, + Operation, + OperationProgress, + CreateClusterRequest, + GetClusterRequest, + UpdateClusterRequest, + UpdateNodePoolRequest, + SetNodePoolAutoscalingRequest, + SetLoggingServiceRequest, + SetMonitoringServiceRequest, + SetAddonsConfigRequest, + SetLocationsRequest, + UpdateMasterRequest, + SetMasterAuthRequest, + DeleteClusterRequest, + ListClustersRequest, + ListClustersResponse, + GetOperationRequest, + ListOperationsRequest, + CancelOperationRequest, + ListOperationsResponse, + GetServerConfigRequest, + ServerConfig, + CreateNodePoolRequest, + DeleteNodePoolRequest, + ListNodePoolsRequest, + GetNodePoolRequest, + NodePool, + NodeManagement, + AutoUpgradeOptions, + MaintenancePolicy, + MaintenanceWindow, + TimeWindow, + RecurringTimeWindow, + DailyMaintenanceWindow, + SetNodePoolManagementRequest, + SetNodePoolSizeRequest, + RollbackNodePoolUpgradeRequest, + ListNodePoolsResponse, + ClusterAutoscaling, + AutoprovisioningNodePoolDefaults, + ResourceLimit, + NodePoolAutoscaling, + SetLabelsRequest, + SetLegacyAbacRequest, + StartIPRotationRequest, + CompleteIPRotationRequest, + AcceleratorConfig, + WorkloadMetadataConfig, + SetNetworkPolicyRequest, + SetMaintenancePolicyRequest, + ListLocationsRequest, + ListLocationsResponse, + Location, + StatusCondition, + NetworkConfig, + ListUsableSubnetworksRequest, + ListUsableSubnetworksResponse, + UsableSubnetworkSecondaryRange, + UsableSubnetwork, + VerticalPodAutoscaling, + IntraNodeVisibilityConfig, + MaxPodsConstraint, + DatabaseEncryption, + ResourceUsageExportConfig, +) + + +__all__ = ( + "NodeConfig", + "ShieldedInstanceConfig", + "NodeTaint", + "MasterAuth", + "ClientCertificateConfig", + "AddonsConfig", + "HttpLoadBalancing", + "HorizontalPodAutoscaling", + "KubernetesDashboard", + "NetworkPolicyConfig", + "PrivateClusterConfig", + "IstioConfig", + "CloudRunConfig", + "MasterAuthorizedNetworksConfig", + "LegacyAbac", + "NetworkPolicy", + "IPAllocationPolicy", + "BinaryAuthorization", + "PodSecurityPolicyConfig", + "AuthenticatorGroupsConfig", + "Cluster", + "ClusterUpdate", + "Operation", + "OperationProgress", + "CreateClusterRequest", + "GetClusterRequest", + "UpdateClusterRequest", + "UpdateNodePoolRequest", + "SetNodePoolAutoscalingRequest", + "SetLoggingServiceRequest", + "SetMonitoringServiceRequest", + "SetAddonsConfigRequest", + "SetLocationsRequest", + "UpdateMasterRequest", + "SetMasterAuthRequest", + "DeleteClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "GetOperationRequest", + "ListOperationsRequest", + "CancelOperationRequest", + "ListOperationsResponse", + "GetServerConfigRequest", + "ServerConfig", + "CreateNodePoolRequest", + "DeleteNodePoolRequest", + "ListNodePoolsRequest", + "GetNodePoolRequest", + "NodePool", + "NodeManagement", + "AutoUpgradeOptions", + "MaintenancePolicy", + "MaintenanceWindow", + "TimeWindow", + "RecurringTimeWindow", + "DailyMaintenanceWindow", + "SetNodePoolManagementRequest", + "SetNodePoolSizeRequest", + "RollbackNodePoolUpgradeRequest", + "ListNodePoolsResponse", + "ClusterAutoscaling", + "AutoprovisioningNodePoolDefaults", + "ResourceLimit", + "NodePoolAutoscaling", + "SetLabelsRequest", + "SetLegacyAbacRequest", + "StartIPRotationRequest", + "CompleteIPRotationRequest", + "AcceleratorConfig", + "WorkloadMetadataConfig", + "SetNetworkPolicyRequest", + "SetMaintenancePolicyRequest", + "ListLocationsRequest", + "ListLocationsResponse", + "Location", + "StatusCondition", + "NetworkConfig", + "ListUsableSubnetworksRequest", + "ListUsableSubnetworksResponse", + "UsableSubnetworkSecondaryRange", + "UsableSubnetwork", + "VerticalPodAutoscaling", + "IntraNodeVisibilityConfig", + "MaxPodsConstraint", + "DatabaseEncryption", + "ResourceUsageExportConfig", +) diff --git a/google/cloud/container_v1beta1/types/cluster_service.py b/google/cloud/container_v1beta1/types/cluster_service.py new file mode 100644 index 00000000..5f7d60e8 --- /dev/null +++ b/google/cloud/container_v1beta1/types/cluster_service.py @@ -0,0 +1,3653 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.container.v1beta1", + manifest={ + "NodeConfig", + "ShieldedInstanceConfig", + "NodeTaint", + "MasterAuth", + "ClientCertificateConfig", + "AddonsConfig", + "HttpLoadBalancing", + "HorizontalPodAutoscaling", + "KubernetesDashboard", + "NetworkPolicyConfig", + "PrivateClusterConfig", + "IstioConfig", + "CloudRunConfig", + "MasterAuthorizedNetworksConfig", + "LegacyAbac", + "NetworkPolicy", + "IPAllocationPolicy", + "BinaryAuthorization", + "PodSecurityPolicyConfig", + "AuthenticatorGroupsConfig", + "Cluster", + "ClusterUpdate", + "Operation", + "OperationProgress", + "CreateClusterRequest", + "GetClusterRequest", + "UpdateClusterRequest", + "UpdateNodePoolRequest", + "SetNodePoolAutoscalingRequest", + "SetLoggingServiceRequest", + "SetMonitoringServiceRequest", + "SetAddonsConfigRequest", + "SetLocationsRequest", + "UpdateMasterRequest", + "SetMasterAuthRequest", + "DeleteClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "GetOperationRequest", + "ListOperationsRequest", + "CancelOperationRequest", + "ListOperationsResponse", + "GetServerConfigRequest", + "ServerConfig", + "CreateNodePoolRequest", + "DeleteNodePoolRequest", + "ListNodePoolsRequest", + "GetNodePoolRequest", + "NodePool", + "NodeManagement", + "AutoUpgradeOptions", + "MaintenancePolicy", + "MaintenanceWindow", + "TimeWindow", + "RecurringTimeWindow", + "DailyMaintenanceWindow", + "SetNodePoolManagementRequest", + "SetNodePoolSizeRequest", + "RollbackNodePoolUpgradeRequest", + "ListNodePoolsResponse", + "ClusterAutoscaling", + "AutoprovisioningNodePoolDefaults", + "ResourceLimit", + "NodePoolAutoscaling", + "SetLabelsRequest", + "SetLegacyAbacRequest", + "StartIPRotationRequest", + "CompleteIPRotationRequest", + "AcceleratorConfig", + "WorkloadMetadataConfig", + "SetNetworkPolicyRequest", + "SetMaintenancePolicyRequest", + "ListLocationsRequest", + "ListLocationsResponse", + "Location", + "StatusCondition", + "NetworkConfig", + "ListUsableSubnetworksRequest", + "ListUsableSubnetworksResponse", + "UsableSubnetworkSecondaryRange", + "UsableSubnetwork", + "VerticalPodAutoscaling", + "IntraNodeVisibilityConfig", + "MaxPodsConstraint", + "DatabaseEncryption", + "ResourceUsageExportConfig", + }, +) + + +class NodeConfig(proto.Message): + r"""Parameters that describe the nodes in a cluster. + + Attributes: + machine_type (str): + The name of a Google Compute Engine `machine + type `__ + (e.g. ``n1-standard-1``). + + If unspecified, the default machine type is + ``n1-standard-1``. + disk_size_gb (int): + Size of the disk attached to each node, + specified in GB. The smallest allowed disk size + is 10GB. + If unspecified, the default disk size is 100GB. + oauth_scopes (Sequence[str]): + The set of Google API scopes to be made available on all of + the node VMs under the "default" service account. + + The following scopes are recommended, but not required, and + by default are not included: + + - ``https://www.googleapis.com/auth/compute`` is required + for mounting persistent storage on your nodes. + - ``https://www.googleapis.com/auth/devstorage.read_only`` + is required for communicating with **gcr.io** (the + `Google Container + Registry `__). + + If unspecified, no scopes are added, unless Cloud Logging or + Cloud Monitoring are enabled, in which case their required + scopes will be added. + service_account (str): + The Google Cloud Platform Service Account to + be used by the node VMs. If no Service Account + is specified, the "default" service account is + used. + metadata (Sequence[~.cluster_service.NodeConfig.MetadataEntry]): + The metadata key/value pairs assigned to instances in the + cluster. + + Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less + than 128 bytes in length. These are reflected as part of a + URL in the metadata server. Additionally, to avoid + ambiguity, keys must not conflict with any other metadata + keys for the project or be one of the reserved keys: + "cluster-location" "cluster-name" "cluster-uid" + "configure-sh" "containerd-configure-sh" "enable-oslogin" + "gci-ensure-gke-docker" "gci-metrics-enabled" + "gci-update-strategy" "instance-template" "kube-env" + "startup-script" "user-data" "disable-address-manager" + "windows-startup-script-ps1" "common-psm1" + "k8s-node-setup-psm1" "install-ssh-psm1" "user-profile-psm1" + "serial-port-logging-enable" Values are free-form strings, + and only have meaning as interpreted by the image running in + the instance. The only restriction placed on them is that + each value's size must be less than or equal to 32 KB. + + The total size of all keys and values must be less than 512 + KB. + image_type (str): + The image type to use for this node. Note + that for a given image type, the latest version + of it will be used. + labels (Sequence[~.cluster_service.NodeConfig.LabelsEntry]): + The map of Kubernetes labels (key/value + pairs) to be applied to each node. These will + added in addition to any default label(s) that + Kubernetes may apply to the node. + In case of conflict in label keys, the applied + set may differ depending on the Kubernetes + version -- it's best to assume the behavior is + undefined and conflicts should be avoided. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/overview/working- + with-objects/labels/ + local_ssd_count (int): + The number of local SSD disks to be attached + to the node. + The limit for this value is dependent upon the + maximum number of disks available on a machine + per zone. See: + https://cloud.google.com/compute/docs/disks/local- + ssd for more information. + tags (Sequence[str]): + The list of instance tags applied to all + nodes. Tags are used to identify valid sources + or targets for network firewalls and are + specified by the client during cluster or node + pool creation. Each tag within the list must + comply with RFC1035. + preemptible (bool): + Whether the nodes are created as preemptible + VM instances. See: + https://cloud.google.com/compute/docs/instances/preemptible + for more inforamtion about preemptible VM + instances. + accelerators (Sequence[~.cluster_service.AcceleratorConfig]): + A list of hardware accelerators to be + attached to each node. See + https://cloud.google.com/compute/docs/gpus for + more information about support for GPUs. + disk_type (str): + Type of the disk attached to each node (e.g. + 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd- + standard' + min_cpu_platform (str): + Minimum CPU platform to be used by this instance. The + instance may be scheduled on the specified or newer CPU + platform. Applicable values are the friendly names of CPU + platforms, such as minCpuPlatform: "Intel Haswell" or + minCpuPlatform: "Intel Sandy Bridge". For more information, + read `how to specify min CPU + platform `__ + To unset the min cpu platform field pass "automatic" as + field value. + workload_metadata_config (~.cluster_service.WorkloadMetadataConfig): + The workload metadata configuration for this + node. + taints (Sequence[~.cluster_service.NodeTaint]): + List of kubernetes taints to be applied to + each node. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/configuration/taint- + and-toleration/ + shielded_instance_config (~.cluster_service.ShieldedInstanceConfig): + Shielded Instance options. + """ + + machine_type = proto.Field(proto.STRING, number=1) + + disk_size_gb = proto.Field(proto.INT32, number=2) + + oauth_scopes = proto.RepeatedField(proto.STRING, number=3) + + service_account = proto.Field(proto.STRING, number=9) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=4) + + image_type = proto.Field(proto.STRING, number=5) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + local_ssd_count = proto.Field(proto.INT32, number=7) + + tags = proto.RepeatedField(proto.STRING, number=8) + + preemptible = proto.Field(proto.BOOL, number=10) + + accelerators = proto.RepeatedField( + proto.MESSAGE, number=11, message="AcceleratorConfig", + ) + + disk_type = proto.Field(proto.STRING, number=12) + + min_cpu_platform = proto.Field(proto.STRING, number=13) + + workload_metadata_config = proto.Field( + proto.MESSAGE, number=14, message="WorkloadMetadataConfig", + ) + + taints = proto.RepeatedField(proto.MESSAGE, number=15, message="NodeTaint",) + + shielded_instance_config = proto.Field( + proto.MESSAGE, number=20, message="ShieldedInstanceConfig", + ) + + +class ShieldedInstanceConfig(proto.Message): + r"""A set of Shielded Instance options. + + Attributes: + enable_secure_boot (bool): + Defines whether the instance has Secure Boot + enabled. + Secure Boot helps ensure that the system only + runs authentic software by verifying the digital + signature of all boot components, and halting + the boot process if signature verification + fails. + enable_integrity_monitoring (bool): + Defines whether the instance has integrity + monitoring enabled. + Enables monitoring and attestation of the boot + integrity of the instance. The attestation is + performed against the integrity policy baseline. + This baseline is initially derived from the + implicitly trusted boot image when the instance + is created. + """ + + enable_secure_boot = proto.Field(proto.BOOL, number=1) + + enable_integrity_monitoring = proto.Field(proto.BOOL, number=2) + + +class NodeTaint(proto.Message): + r"""Kubernetes taint is comprised of three fields: key, value, + and effect. Effect can only be one of three types: NoSchedule, + PreferNoSchedule or NoExecute. + For more information, including usage and the valid values, see: + https://kubernetes.io/docs/concepts/configuration/taint-and- + toleration/ + + Attributes: + key (str): + Key for taint. + value (str): + Value for taint. + effect (~.cluster_service.NodeTaint.Effect): + Effect for taint. + """ + + class Effect(proto.Enum): + r"""Possible values for Effect in taint.""" + EFFECT_UNSPECIFIED = 0 + NO_SCHEDULE = 1 + PREFER_NO_SCHEDULE = 2 + NO_EXECUTE = 3 + + key = proto.Field(proto.STRING, number=1) + + value = proto.Field(proto.STRING, number=2) + + effect = proto.Field(proto.ENUM, number=3, enum=Effect,) + + +class MasterAuth(proto.Message): + r"""The authentication information for accessing the master + endpoint. Authentication can be done using HTTP basic auth or + using client certificates. + + Attributes: + username (str): + The username to use for HTTP basic + authentication to the master endpoint. For + clusters v1.6.0 and later, basic authentication + can be disabled by leaving username unspecified + (or setting it to the empty string). + password (str): + The password to use for HTTP basic + authentication to the master endpoint. Because + the master endpoint is open to the Internet, you + should create a strong password. If a password + is provided for cluster creation, username must + be non-empty. + client_certificate_config (~.cluster_service.ClientCertificateConfig): + Configuration for client certificate + authentication on the cluster. For clusters + before v1.12, if no configuration is specified, + a client certificate is issued. + cluster_ca_certificate (str): + [Output only] Base64-encoded public certificate that is the + root of trust for the cluster. + client_certificate (str): + [Output only] Base64-encoded public certificate used by + clients to authenticate to the cluster endpoint. + client_key (str): + [Output only] Base64-encoded private key used by clients to + authenticate to the cluster endpoint. + """ + + username = proto.Field(proto.STRING, number=1) + + password = proto.Field(proto.STRING, number=2) + + client_certificate_config = proto.Field( + proto.MESSAGE, number=3, message="ClientCertificateConfig", + ) + + cluster_ca_certificate = proto.Field(proto.STRING, number=100) + + client_certificate = proto.Field(proto.STRING, number=101) + + client_key = proto.Field(proto.STRING, number=102) + + +class ClientCertificateConfig(proto.Message): + r"""Configuration for client certificates on the cluster. + + Attributes: + issue_client_certificate (bool): + Issue a client certificate. + """ + + issue_client_certificate = proto.Field(proto.BOOL, number=1) + + +class AddonsConfig(proto.Message): + r"""Configuration for the addons that can be automatically spun + up in the cluster, enabling additional functionality. + + Attributes: + http_load_balancing (~.cluster_service.HttpLoadBalancing): + Configuration for the HTTP (L7) load + balancing controller addon, which makes it easy + to set up HTTP load balancers for services in a + cluster. + horizontal_pod_autoscaling (~.cluster_service.HorizontalPodAutoscaling): + Configuration for the horizontal pod + autoscaling feature, which increases or + decreases the number of replica pods a + replication controller has based on the resource + usage of the existing pods. + kubernetes_dashboard (~.cluster_service.KubernetesDashboard): + Configuration for the Kubernetes Dashboard. + This addon is deprecated, and will be disabled + in 1.15. It is recommended to use the Cloud + Console to manage and monitor your Kubernetes + clusters, workloads and applications. For more + information, see: + https://cloud.google.com/kubernetes- + engine/docs/concepts/dashboards + network_policy_config (~.cluster_service.NetworkPolicyConfig): + Configuration for NetworkPolicy. This only + tracks whether the addon is enabled or not on + the Master, it does not track whether network + policy is enabled for the nodes. + istio_config (~.cluster_service.IstioConfig): + Configuration for Istio, an open platform to + connect, manage, and secure microservices. + cloud_run_config (~.cluster_service.CloudRunConfig): + Configuration for the Cloud Run addon. The ``IstioConfig`` + addon must be enabled in order to enable Cloud Run addon. + This option can only be enabled at cluster creation time. + """ + + http_load_balancing = proto.Field( + proto.MESSAGE, number=1, message="HttpLoadBalancing", + ) + + horizontal_pod_autoscaling = proto.Field( + proto.MESSAGE, number=2, message="HorizontalPodAutoscaling", + ) + + kubernetes_dashboard = proto.Field( + proto.MESSAGE, number=3, message="KubernetesDashboard", + ) + + network_policy_config = proto.Field( + proto.MESSAGE, number=4, message="NetworkPolicyConfig", + ) + + istio_config = proto.Field(proto.MESSAGE, number=5, message="IstioConfig",) + + cloud_run_config = proto.Field(proto.MESSAGE, number=7, message="CloudRunConfig",) + + +class HttpLoadBalancing(proto.Message): + r"""Configuration options for the HTTP (L7) load balancing + controller addon, which makes it easy to set up HTTP load + balancers for services in a cluster. + + Attributes: + disabled (bool): + Whether the HTTP Load Balancing controller is + enabled in the cluster. When enabled, it runs a + small pod in the cluster that manages the load + balancers. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class HorizontalPodAutoscaling(proto.Message): + r"""Configuration options for the horizontal pod autoscaling + feature, which increases or decreases the number of replica pods + a replication controller has based on the resource usage of the + existing pods. + + Attributes: + disabled (bool): + Whether the Horizontal Pod Autoscaling + feature is enabled in the cluster. When enabled, + it ensures that a Heapster pod is running in the + cluster, which is also used by the Cloud + Monitoring service. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class KubernetesDashboard(proto.Message): + r"""Configuration for the Kubernetes Dashboard. + + Attributes: + disabled (bool): + Whether the Kubernetes Dashboard is enabled + for this cluster. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class NetworkPolicyConfig(proto.Message): + r"""Configuration for NetworkPolicy. This only tracks whether the + addon is enabled or not on the Master, it does not track whether + network policy is enabled for the nodes. + + Attributes: + disabled (bool): + Whether NetworkPolicy is enabled for this + cluster. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class PrivateClusterConfig(proto.Message): + r"""Configuration options for private clusters. + + Attributes: + enable_private_nodes (bool): + Whether nodes have internal IP addresses + only. If enabled, all nodes are given only RFC + 1918 private addresses and communicate with the + master via private networking. + enable_private_endpoint (bool): + Whether the master's internal IP address is + used as the cluster endpoint. + master_ipv4_cidr_block (str): + The IP range in CIDR notation to use for the + hosted master network. This range will be used + for assigning internal IP addresses to the + master or set of masters, as well as the ILB + VIP. This range must not overlap with any other + ranges in use within the cluster's network. + private_endpoint (str): + Output only. The internal IP address of this + cluster's master endpoint. + public_endpoint (str): + Output only. The external IP address of this + cluster's master endpoint. + """ + + enable_private_nodes = proto.Field(proto.BOOL, number=1) + + enable_private_endpoint = proto.Field(proto.BOOL, number=2) + + master_ipv4_cidr_block = proto.Field(proto.STRING, number=3) + + private_endpoint = proto.Field(proto.STRING, number=4) + + public_endpoint = proto.Field(proto.STRING, number=5) + + +class IstioConfig(proto.Message): + r"""Configuration options for Istio addon. + + Attributes: + disabled (bool): + Whether Istio is enabled for this cluster. + auth (~.cluster_service.IstioConfig.IstioAuthMode): + The specified Istio auth mode, either none, + or mutual TLS. + """ + + class IstioAuthMode(proto.Enum): + r"""Istio auth mode, + https://istio.io/docs/concepts/security/mutual-tls.html + """ + AUTH_NONE = 0 + AUTH_MUTUAL_TLS = 1 + + disabled = proto.Field(proto.BOOL, number=1) + + auth = proto.Field(proto.ENUM, number=2, enum=IstioAuthMode,) + + +class CloudRunConfig(proto.Message): + r"""Configuration options for the Cloud Run feature. + + Attributes: + disabled (bool): + Whether Cloud Run addon is enabled for this + cluster. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + +class MasterAuthorizedNetworksConfig(proto.Message): + r"""Configuration options for the master authorized networks + feature. Enabled master authorized networks will disallow all + external traffic to access Kubernetes master through HTTPS + except traffic from the given CIDR blocks, Google Compute Engine + Public IPs and Google Prod IPs. + + Attributes: + enabled (bool): + Whether or not master authorized networks is + enabled. + cidr_blocks (Sequence[~.cluster_service.MasterAuthorizedNetworksConfig.CidrBlock]): + cidr_blocks define up to 10 external networks that could + access Kubernetes master through HTTPS. + """ + + class CidrBlock(proto.Message): + r"""CidrBlock contains an optional name and one CIDR block. + + Attributes: + display_name (str): + display_name is an optional field for users to identify CIDR + blocks. + cidr_block (str): + cidr_block must be specified in CIDR notation. + """ + + display_name = proto.Field(proto.STRING, number=1) + + cidr_block = proto.Field(proto.STRING, number=2) + + enabled = proto.Field(proto.BOOL, number=1) + + cidr_blocks = proto.RepeatedField(proto.MESSAGE, number=2, message=CidrBlock,) + + +class LegacyAbac(proto.Message): + r"""Configuration for the legacy Attribute Based Access Control + authorization mode. + + Attributes: + enabled (bool): + Whether the ABAC authorizer is enabled for + this cluster. When enabled, identities in the + system, including service accounts, nodes, and + controllers, will have statically granted + permissions beyond those provided by the RBAC + configuration or IAM. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class NetworkPolicy(proto.Message): + r"""Configuration options for the NetworkPolicy feature. + https://kubernetes.io/docs/concepts/services- + networking/networkpolicies/ + + Attributes: + provider (~.cluster_service.NetworkPolicy.Provider): + The selected network policy provider. + enabled (bool): + Whether network policy is enabled on the + cluster. + """ + + class Provider(proto.Enum): + r"""Allowed Network Policy providers.""" + PROVIDER_UNSPECIFIED = 0 + CALICO = 1 + + provider = proto.Field(proto.ENUM, number=1, enum=Provider,) + + enabled = proto.Field(proto.BOOL, number=2) + + +class IPAllocationPolicy(proto.Message): + r"""Configuration for controlling how IPs are allocated in the + cluster. + + Attributes: + use_ip_aliases (bool): + Whether alias IPs will be used for pod IPs in + the cluster. + create_subnetwork (bool): + Whether a new subnetwork will be created automatically for + the cluster. + + This field is only applicable when ``use_ip_aliases`` is + true. + subnetwork_name (str): + A custom subnetwork name to be used if ``create_subnetwork`` + is true. If this field is empty, then an automatic name will + be chosen for the new subnetwork. + cluster_ipv4_cidr (str): + This field is deprecated, use cluster_ipv4_cidr_block. + node_ipv4_cidr (str): + This field is deprecated, use node_ipv4_cidr_block. + services_ipv4_cidr (str): + This field is deprecated, use services_ipv4_cidr_block. + cluster_secondary_range_name (str): + The name of the secondary range to be used for the cluster + CIDR block. The secondary range will be used for pod IP + addresses. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases and + create_subnetwork is false. + services_secondary_range_name (str): + The name of the secondary range to be used as for the + services CIDR block. The secondary range will be used for + service ClusterIPs. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases and + create_subnetwork is false. + cluster_ipv4_cidr_block (str): + The IP address range for the cluster pod IPs. If this field + is set, then ``cluster.cluster_ipv4_cidr`` must be left + blank. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + node_ipv4_cidr_block (str): + The IP address range of the instance IPs in this cluster. + + This is applicable only if ``create_subnetwork`` is true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + services_ipv4_cidr_block (str): + The IP address range of the services IPs in this cluster. If + blank, a range will be automatically chosen with the default + size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + allow_route_overlap (bool): + If true, allow allocation of cluster CIDR ranges that + overlap with certain kinds of network routes. By default we + do not allow cluster CIDR ranges to intersect with any user + declared routes. With allow_route_overlap == true, we allow + overlapping with CIDR ranges that are larger than the + cluster CIDR range. + + If this field is set to true, then cluster and services + CIDRs must be fully-specified (e.g. ``10.96.0.0/14``, but + not ``/14``), which means: + + 1) When ``use_ip_aliases`` is true, + ``cluster_ipv4_cidr_block`` and + ``services_ipv4_cidr_block`` must be fully-specified. + 2) When ``use_ip_aliases`` is false, + ``cluster.cluster_ipv4_cidr`` muse be fully-specified. + tpu_ipv4_cidr_block (str): + The IP address range of the Cloud TPUs in this cluster. If + unspecified, a range will be automatically chosen with the + default size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + If unspecified, the range will use the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + """ + + use_ip_aliases = proto.Field(proto.BOOL, number=1) + + create_subnetwork = proto.Field(proto.BOOL, number=2) + + subnetwork_name = proto.Field(proto.STRING, number=3) + + cluster_ipv4_cidr = proto.Field(proto.STRING, number=4) + + node_ipv4_cidr = proto.Field(proto.STRING, number=5) + + services_ipv4_cidr = proto.Field(proto.STRING, number=6) + + cluster_secondary_range_name = proto.Field(proto.STRING, number=7) + + services_secondary_range_name = proto.Field(proto.STRING, number=8) + + cluster_ipv4_cidr_block = proto.Field(proto.STRING, number=9) + + node_ipv4_cidr_block = proto.Field(proto.STRING, number=10) + + services_ipv4_cidr_block = proto.Field(proto.STRING, number=11) + + allow_route_overlap = proto.Field(proto.BOOL, number=12) + + tpu_ipv4_cidr_block = proto.Field(proto.STRING, number=13) + + +class BinaryAuthorization(proto.Message): + r"""Configuration for Binary Authorization. + + Attributes: + enabled (bool): + Enable Binary Authorization for this cluster. + If enabled, all container images will be + validated by Google Binauthz. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class PodSecurityPolicyConfig(proto.Message): + r"""Configuration for the PodSecurityPolicy feature. + + Attributes: + enabled (bool): + Enable the PodSecurityPolicy controller for + this cluster. If enabled, pods must be valid + under a PodSecurityPolicy to be created. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class AuthenticatorGroupsConfig(proto.Message): + r"""Configuration for returning group information from + authenticators. + + Attributes: + enabled (bool): + Whether this cluster should return group + membership lookups during authentication using a + group of security groups. + security_group (str): + The name of the security group-of-groups to + be used. Only relevant if enabled = true. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + security_group = proto.Field(proto.STRING, number=2) + + +class Cluster(proto.Message): + r"""A Google Kubernetes Engine cluster. + + Attributes: + name (str): + The name of this cluster. The name must be unique within + this project and location (e.g. zone or region), and can be + up to 40 characters with the following restrictions: + + - Lowercase letters, numbers, and hyphens only. + - Must start with a letter. + - Must end with a number or a letter. + description (str): + An optional description of this cluster. + initial_node_count (int): + The number of nodes to create in this cluster. You must + ensure that your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. For requests, this + field should only be used in lieu of a "node_pool" object, + since this configuration (along with the "node_config") will + be used to create a "NodePool" object with an auto-generated + name. Do not use this and a node_pool at the same time. + + This field is deprecated, use node_pool.initial_node_count + instead. + node_config (~.cluster_service.NodeConfig): + Parameters used in creating the cluster's nodes. For + requests, this field should only be used in lieu of a + "node_pool" object, since this configuration (along with the + "initial_node_count") will be used to create a "NodePool" + object with an auto-generated name. Do not use this and a + node_pool at the same time. For responses, this field will + be populated with the node configuration of the first node + pool. (For configuration of each node pool, see + ``node_pool.config``) + + If unspecified, the defaults are used. This field is + deprecated, use node_pool.config instead. + master_auth (~.cluster_service.MasterAuth): + The authentication information for accessing the master + endpoint. If unspecified, the defaults are used: For + clusters before v1.12, if master_auth is unspecified, + ``username`` will be set to "admin", a random password will + be generated, and a client certificate will be issued. + logging_service (str): + The logging service the cluster should use to write logs. + Currently available options: + + - ``logging.googleapis.com`` - the Google Cloud Logging + service. + - ``none`` - no logs will be exported from the cluster. + - if left as an empty string,\ ``logging.googleapis.com`` + will be used. + monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - ``monitoring.googleapis.com`` - the Google Cloud + Monitoring service. + - ``none`` - no metrics will be exported from the cluster. + - if left as an empty string, ``monitoring.googleapis.com`` + will be used. + network (str): + The name of the Google Compute Engine + `network `__ + to which the cluster is connected. If left unspecified, the + ``default`` network will be used. On output this shows the + network ID instead of the name. + cluster_ipv4_cidr (str): + The IP address range of the container pods in this cluster, + in + `CIDR `__ + notation (e.g. ``10.96.0.0/14``). Leave blank to have one + automatically chosen or specify a ``/14`` block in + ``10.0.0.0/8``. + addons_config (~.cluster_service.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + subnetwork (str): + The name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. On output this shows the + subnetwork ID instead of the name. + node_pools (Sequence[~.cluster_service.NodePool]): + The node pools associated with this cluster. This field + should not be set if "node_config" or "initial_node_count" + are specified. + locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. + enable_kubernetes_alpha (bool): + Kubernetes alpha features are enabled on this + cluster. This includes alpha API groups (e.g. + v1beta1) and features that may not be production + ready in the kubernetes version of the master + and nodes. The cluster has no SLA for uptime and + master/node upgrades are disabled. Alpha enabled + clusters are automatically deleted thirty days + after creation. + resource_labels (Sequence[~.cluster_service.Cluster.ResourceLabelsEntry]): + The resource labels for the cluster to use to + annotate any related Google Compute Engine + resources. + label_fingerprint (str): + The fingerprint of the set of labels for this + cluster. + legacy_abac (~.cluster_service.LegacyAbac): + Configuration for the legacy ABAC + authorization mode. + network_policy (~.cluster_service.NetworkPolicy): + Configuration options for the NetworkPolicy + feature. + ip_allocation_policy (~.cluster_service.IPAllocationPolicy): + Configuration for cluster IP allocation. + master_authorized_networks_config (~.cluster_service.MasterAuthorizedNetworksConfig): + The configuration options for master + authorized networks feature. + maintenance_policy (~.cluster_service.MaintenancePolicy): + Configure the maintenance policy for this + cluster. + binary_authorization (~.cluster_service.BinaryAuthorization): + Configuration for Binary Authorization. + pod_security_policy_config (~.cluster_service.PodSecurityPolicyConfig): + Configuration for the PodSecurityPolicy + feature. + autoscaling (~.cluster_service.ClusterAutoscaling): + Cluster-level autoscaling configuration. + network_config (~.cluster_service.NetworkConfig): + Configuration for cluster networking. + private_cluster (bool): + If this is a private cluster setup. Private clusters are + clusters that, by default have no external IP addresses on + the nodes and where nodes and the master communicate over + private IP addresses. This field is deprecated, use + private_cluster_config.enable_private_nodes instead. + master_ipv4_cidr_block (str): + The IP prefix in CIDR notation to use for the hosted master + network. This prefix will be used for assigning private IP + addresses to the master or set of masters, as well as the + ILB VIP. This field is deprecated, use + private_cluster_config.master_ipv4_cidr_block instead. + default_max_pods_constraint (~.cluster_service.MaxPodsConstraint): + The default constraint on the maximum number + of pods that can be run simultaneously on a node + in the node pool of this cluster. Only honored + if cluster created with IP Alias support. + resource_usage_export_config (~.cluster_service.ResourceUsageExportConfig): + Configuration for exporting resource usages. + Resource usage export is disabled when this + config unspecified. + authenticator_groups_config (~.cluster_service.AuthenticatorGroupsConfig): + Configuration controlling RBAC group + membership information. + private_cluster_config (~.cluster_service.PrivateClusterConfig): + Configuration for private cluster. + vertical_pod_autoscaling (~.cluster_service.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + self_link (str): + [Output only] Server-defined URL for the resource. + zone (str): + [Output only] The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field is deprecated, use + location instead. + endpoint (str): + [Output only] The IP address of this cluster's master + endpoint. The endpoint can be accessed from the internet at + ``https://username:password@endpoint/``. + + See the ``masterAuth`` property of this resource for + username and password information. + initial_cluster_version (str): + The initial Kubernetes version for this + cluster. Valid versions are those found in + validMasterVersions returned by getServerConfig. + The version can be upgraded over time; such + upgrades are reflected in currentMasterVersion + and currentNodeVersion. + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "","-": picks the default + Kubernetes version + current_master_version (str): + [Output only] The current software version of the master + endpoint. + current_node_version (str): + [Output only] Deprecated, use + `NodePool.version `__ + instead. The current version of the node software + components. If they are currently at multiple versions + because they're in the process of being upgraded, this + reflects the minimum version of all nodes. + create_time (str): + [Output only] The time the cluster was created, in + `RFC3339 `__ text + format. + status (~.cluster_service.Cluster.Status): + [Output only] The current status of this cluster. + status_message (str): + [Output only] Additional information about the current + status of this cluster, if available. + node_ipv4_cidr_size (int): + [Output only] The size of the address space on each node for + hosting containers. This is provisioned from within the + ``container_ipv4_cidr`` range. This field will only be set + when cluster is in route-based network mode. + services_ipv4_cidr (str): + [Output only] The IP address range of the Kubernetes + services in this cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). Service addresses are + typically put in the last ``/16`` from the container CIDR. + instance_group_urls (Sequence[str]): + Deprecated. Use node_pools.instance_group_urls. + current_node_count (int): + [Output only] The number of nodes currently in the cluster. + Deprecated. Call Kubernetes API directly to retrieve node + information. + expire_time (str): + [Output only] The time the cluster will be automatically + deleted in + `RFC3339 `__ text + format. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + enable_tpu (bool): + Enable the ability to use Cloud TPUs in this + cluster. + tpu_ipv4_cidr_block (str): + [Output only] The IP address range of the Cloud TPUs in this + cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). + database_encryption (~.cluster_service.DatabaseEncryption): + Configuration of etcd encryption. + conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current cluster + state. + """ + + class Status(proto.Enum): + r"""The current status of the cluster.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RECONCILING = 3 + STOPPING = 4 + ERROR = 5 + DEGRADED = 6 + + name = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=2) + + initial_node_count = proto.Field(proto.INT32, number=3) + + node_config = proto.Field(proto.MESSAGE, number=4, message=NodeConfig,) + + master_auth = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,) + + logging_service = proto.Field(proto.STRING, number=6) + + monitoring_service = proto.Field(proto.STRING, number=7) + + network = proto.Field(proto.STRING, number=8) + + cluster_ipv4_cidr = proto.Field(proto.STRING, number=9) + + addons_config = proto.Field(proto.MESSAGE, number=10, message=AddonsConfig,) + + subnetwork = proto.Field(proto.STRING, number=11) + + node_pools = proto.RepeatedField(proto.MESSAGE, number=12, message="NodePool",) + + locations = proto.RepeatedField(proto.STRING, number=13) + + enable_kubernetes_alpha = proto.Field(proto.BOOL, number=14) + + resource_labels = proto.MapField(proto.STRING, proto.STRING, number=15) + + label_fingerprint = proto.Field(proto.STRING, number=16) + + legacy_abac = proto.Field(proto.MESSAGE, number=18, message=LegacyAbac,) + + network_policy = proto.Field(proto.MESSAGE, number=19, message=NetworkPolicy,) + + ip_allocation_policy = proto.Field( + proto.MESSAGE, number=20, message=IPAllocationPolicy, + ) + + master_authorized_networks_config = proto.Field( + proto.MESSAGE, number=22, message=MasterAuthorizedNetworksConfig, + ) + + maintenance_policy = proto.Field( + proto.MESSAGE, number=23, message="MaintenancePolicy", + ) + + binary_authorization = proto.Field( + proto.MESSAGE, number=24, message=BinaryAuthorization, + ) + + pod_security_policy_config = proto.Field( + proto.MESSAGE, number=25, message=PodSecurityPolicyConfig, + ) + + autoscaling = proto.Field(proto.MESSAGE, number=26, message="ClusterAutoscaling",) + + network_config = proto.Field(proto.MESSAGE, number=27, message="NetworkConfig",) + + private_cluster = proto.Field(proto.BOOL, number=28) + + master_ipv4_cidr_block = proto.Field(proto.STRING, number=29) + + default_max_pods_constraint = proto.Field( + proto.MESSAGE, number=30, message="MaxPodsConstraint", + ) + + resource_usage_export_config = proto.Field( + proto.MESSAGE, number=33, message="ResourceUsageExportConfig", + ) + + authenticator_groups_config = proto.Field( + proto.MESSAGE, number=34, message=AuthenticatorGroupsConfig, + ) + + private_cluster_config = proto.Field( + proto.MESSAGE, number=37, message=PrivateClusterConfig, + ) + + vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, number=39, message="VerticalPodAutoscaling", + ) + + self_link = proto.Field(proto.STRING, number=100) + + zone = proto.Field(proto.STRING, number=101) + + endpoint = proto.Field(proto.STRING, number=102) + + initial_cluster_version = proto.Field(proto.STRING, number=103) + + current_master_version = proto.Field(proto.STRING, number=104) + + current_node_version = proto.Field(proto.STRING, number=105) + + create_time = proto.Field(proto.STRING, number=106) + + status = proto.Field(proto.ENUM, number=107, enum=Status,) + + status_message = proto.Field(proto.STRING, number=108) + + node_ipv4_cidr_size = proto.Field(proto.INT32, number=109) + + services_ipv4_cidr = proto.Field(proto.STRING, number=110) + + instance_group_urls = proto.RepeatedField(proto.STRING, number=111) + + current_node_count = proto.Field(proto.INT32, number=112) + + expire_time = proto.Field(proto.STRING, number=113) + + location = proto.Field(proto.STRING, number=114) + + enable_tpu = proto.Field(proto.BOOL, number=115) + + tpu_ipv4_cidr_block = proto.Field(proto.STRING, number=116) + + database_encryption = proto.Field( + proto.MESSAGE, number=38, message="DatabaseEncryption", + ) + + conditions = proto.RepeatedField( + proto.MESSAGE, number=118, message="StatusCondition", + ) + + +class ClusterUpdate(proto.Message): + r"""ClusterUpdate describes an update to the cluster. Exactly one + update can be applied to a cluster with each request, so at most + one field can be provided. + + Attributes: + desired_node_version (str): + The Kubernetes version to change the nodes to + (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + desired_monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - the Google Cloud + Monitoring service with Kubernetes-native resource model + - "monitoring.googleapis.com" - the Google Cloud Monitoring + service + - "none" - no metrics will be exported from the cluster + desired_addons_config (~.cluster_service.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + desired_node_pool_id (str): + The node pool to be upgraded. This field is mandatory if + "desired_node_version", "desired_image_family", + "desired_node_pool_autoscaling", or + "desired_workload_metadata_config" is specified and there is + more than one node pool on the cluster. + desired_image_type (str): + The desired image type for the node pool. NOTE: Set the + "desired_node_pool" field as well. + desired_node_pool_autoscaling (~.cluster_service.NodePoolAutoscaling): + Autoscaler configuration for the node pool specified in + desired_node_pool_id. If there is only one pool in the + cluster and desired_node_pool_id is not provided then the + change applies to that single node pool. + desired_locations (Sequence[str]): + The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing the + locations a cluster is in will result in nodes being either + created or removed from the cluster, depending on whether + locations are being added or removed. + + This list must always include the cluster's primary zone. + desired_master_authorized_networks_config (~.cluster_service.MasterAuthorizedNetworksConfig): + The desired configuration options for master + authorized networks feature. + desired_pod_security_policy_config (~.cluster_service.PodSecurityPolicyConfig): + The desired configuration options for the + PodSecurityPolicy feature. + desired_cluster_autoscaling (~.cluster_service.ClusterAutoscaling): + Cluster-level autoscaling configuration. + desired_binary_authorization (~.cluster_service.BinaryAuthorization): + The desired configuration options for the + Binary Authorization feature. + desired_logging_service (str): + The logging service the cluster should use to write metrics. + Currently available options: + + - "logging.googleapis.com/kubernetes" - the Google Cloud + Logging service with Kubernetes-native resource model + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no logs will be exported from the cluster + desired_resource_usage_export_config (~.cluster_service.ResourceUsageExportConfig): + The desired configuration for exporting + resource usage. + desired_vertical_pod_autoscaling (~.cluster_service.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + desired_intra_node_visibility_config (~.cluster_service.IntraNodeVisibilityConfig): + The desired config of Intra-node visibility. + desired_master_version (str): + The Kubernetes version to change the master + to. The only valid value is the latest supported + version. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + """ + + desired_node_version = proto.Field(proto.STRING, number=4) + + desired_monitoring_service = proto.Field(proto.STRING, number=5) + + desired_addons_config = proto.Field(proto.MESSAGE, number=6, message=AddonsConfig,) + + desired_node_pool_id = proto.Field(proto.STRING, number=7) + + desired_image_type = proto.Field(proto.STRING, number=8) + + desired_node_pool_autoscaling = proto.Field( + proto.MESSAGE, number=9, message="NodePoolAutoscaling", + ) + + desired_locations = proto.RepeatedField(proto.STRING, number=10) + + desired_master_authorized_networks_config = proto.Field( + proto.MESSAGE, number=12, message=MasterAuthorizedNetworksConfig, + ) + + desired_pod_security_policy_config = proto.Field( + proto.MESSAGE, number=14, message=PodSecurityPolicyConfig, + ) + + desired_cluster_autoscaling = proto.Field( + proto.MESSAGE, number=15, message="ClusterAutoscaling", + ) + + desired_binary_authorization = proto.Field( + proto.MESSAGE, number=16, message=BinaryAuthorization, + ) + + desired_logging_service = proto.Field(proto.STRING, number=19) + + desired_resource_usage_export_config = proto.Field( + proto.MESSAGE, number=21, message="ResourceUsageExportConfig", + ) + + desired_vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, number=22, message="VerticalPodAutoscaling", + ) + + desired_intra_node_visibility_config = proto.Field( + proto.MESSAGE, number=26, message="IntraNodeVisibilityConfig", + ) + + desired_master_version = proto.Field(proto.STRING, number=100) + + +class Operation(proto.Message): + r"""This operation resource represents operations that may have + happened or are happening on the cluster. All fields are output + only. + + Attributes: + name (str): + The server-assigned ID for the operation. + zone (str): + The name of the Google Compute Engine + `zone `__ + in which the operation is taking place. This field is + deprecated, use location instead. + operation_type (~.cluster_service.Operation.Type): + The operation type. + status (~.cluster_service.Operation.Status): + The current status of the operation. + detail (str): + Detailed operation progress, if available. + status_message (str): + If an error has occurred, a textual + description of the error. + self_link (str): + Server-defined URL for the resource. + target_link (str): + Server-defined URL for the target of the + operation. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + start_time (str): + [Output only] The time the operation started, in + `RFC3339 `__ text + format. + end_time (str): + [Output only] The time the operation completed, in + `RFC3339 `__ text + format. + progress (~.cluster_service.OperationProgress): + [Output only] Progress information for an operation. + cluster_conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current cluster + state. + nodepool_conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current node pool + state. + """ + + class Status(proto.Enum): + r"""Current status of the operation.""" + STATUS_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + ABORTING = 4 + + class Type(proto.Enum): + r"""Operation type.""" + TYPE_UNSPECIFIED = 0 + CREATE_CLUSTER = 1 + DELETE_CLUSTER = 2 + UPGRADE_MASTER = 3 + UPGRADE_NODES = 4 + REPAIR_CLUSTER = 5 + UPDATE_CLUSTER = 6 + CREATE_NODE_POOL = 7 + DELETE_NODE_POOL = 8 + SET_NODE_POOL_MANAGEMENT = 9 + AUTO_REPAIR_NODES = 10 + AUTO_UPGRADE_NODES = 11 + SET_LABELS = 12 + SET_MASTER_AUTH = 13 + SET_NODE_POOL_SIZE = 14 + SET_NETWORK_POLICY = 15 + SET_MAINTENANCE_POLICY = 16 + + name = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + operation_type = proto.Field(proto.ENUM, number=3, enum=Type,) + + status = proto.Field(proto.ENUM, number=4, enum=Status,) + + detail = proto.Field(proto.STRING, number=8) + + status_message = proto.Field(proto.STRING, number=5) + + self_link = proto.Field(proto.STRING, number=6) + + target_link = proto.Field(proto.STRING, number=7) + + location = proto.Field(proto.STRING, number=9) + + start_time = proto.Field(proto.STRING, number=10) + + end_time = proto.Field(proto.STRING, number=11) + + progress = proto.Field(proto.MESSAGE, number=12, message="OperationProgress",) + + cluster_conditions = proto.RepeatedField( + proto.MESSAGE, number=13, message="StatusCondition", + ) + + nodepool_conditions = proto.RepeatedField( + proto.MESSAGE, number=14, message="StatusCondition", + ) + + +class OperationProgress(proto.Message): + r"""Information about operation (or operation stage) progress. + + Attributes: + name (str): + A non-parameterized string describing an + operation stage. Unset for single-stage + operations. + status (~.cluster_service.Operation.Status): + Status of an operation stage. + Unset for single-stage operations. + metrics (Sequence[~.cluster_service.OperationProgress.Metric]): + Progress metric bundle, for example: metrics: [{name: "nodes + done", int_value: 15}, {name: "nodes total", int_value: 32}] + or metrics: [{name: "progress", double_value: 0.56}, {name: + "progress scale", double_value: 1.0}] + stages (Sequence[~.cluster_service.OperationProgress]): + Substages of an operation or a stage. + """ + + class Metric(proto.Message): + r"""Progress metric is (string, int|float|string) pair. + + Attributes: + name (str): + Metric name, required. + e.g., "nodes total", "percent done". + int_value (int): + For metrics with integer value. + double_value (float): + For metrics with floating point value. + string_value (str): + For metrics with custom values (ratios, + visual progress, etc.). + """ + + name = proto.Field(proto.STRING, number=1) + + int_value = proto.Field(proto.INT64, number=2) + + double_value = proto.Field(proto.DOUBLE, number=3) + + string_value = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=1) + + status = proto.Field(proto.ENUM, number=2, enum=Operation.Status,) + + metrics = proto.RepeatedField(proto.MESSAGE, number=3, message=Metric,) + + stages = proto.RepeatedField(proto.MESSAGE, number=4, message="OperationProgress",) + + +class CreateClusterRequest(proto.Message): + r"""CreateClusterRequest creates a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster (~.cluster_service.Cluster): + Required. A `cluster + resource `__ + parent (str): + The parent (project and location) where the cluster will be + created. Specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster = proto.Field(proto.MESSAGE, number=3, message=Cluster,) + + parent = proto.Field(proto.STRING, number=5) + + +class GetClusterRequest(proto.Message): + r"""GetClusterRequest gets the settings of a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to retrieve. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=5) + + +class UpdateClusterRequest(proto.Message): + r"""UpdateClusterRequest updates the settings of a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + update (~.cluster_service.ClusterUpdate): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + update = proto.Field(proto.MESSAGE, number=4, message=ClusterUpdate,) + + name = proto.Field(proto.STRING, number=5) + + +class UpdateNodePoolRequest(proto.Message): + r"""SetNodePoolVersionRequest updates the version of a node pool. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to upgrade. This field has been deprecated + and replaced by the name field. + node_version (str): + Required. The Kubernetes version to change + the nodes to (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + image_type (str): + Required. The desired image type for the node + pool. + workload_metadata_config (~.cluster_service.WorkloadMetadataConfig): + The desired image type for the node pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to update. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + node_version = proto.Field(proto.STRING, number=5) + + image_type = proto.Field(proto.STRING, number=6) + + workload_metadata_config = proto.Field( + proto.MESSAGE, number=14, message="WorkloadMetadataConfig", + ) + + name = proto.Field(proto.STRING, number=8) + + +class SetNodePoolAutoscalingRequest(proto.Message): + r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of + a node pool. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to upgrade. This field has been deprecated + and replaced by the name field. + autoscaling (~.cluster_service.NodePoolAutoscaling): + Required. Autoscaling configuration for the + node pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to set autoscaler settings. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + autoscaling = proto.Field(proto.MESSAGE, number=5, message="NodePoolAutoscaling",) + + name = proto.Field(proto.STRING, number=6) + + +class SetLoggingServiceRequest(proto.Message): + r"""SetLoggingServiceRequest sets the logging service of a + cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + logging_service (str): + Required. The logging service the cluster should use to + write metrics. Currently available options: + + - "logging.googleapis.com" - the Google Cloud Logging + service + - "none" - no metrics will be exported from the cluster + name (str): + The name (project, location, cluster) of the cluster to set + logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + logging_service = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=5) + + +class SetMonitoringServiceRequest(proto.Message): + r"""SetMonitoringServiceRequest sets the monitoring service of a + cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + monitoring_service (str): + Required. The monitoring service the cluster should use to + write metrics. Currently available options: + + - "monitoring.googleapis.com" - the Google Cloud Monitoring + service + - "none" - no metrics will be exported from the cluster + name (str): + The name (project, location, cluster) of the cluster to set + monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + monitoring_service = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class SetAddonsConfigRequest(proto.Message): + r"""SetAddonsRequest sets the addons associated with the cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + addons_config (~.cluster_service.AddonsConfig): + Required. The desired configurations for the + various addons available to run in the cluster. + name (str): + The name (project, location, cluster) of the cluster to set + addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + addons_config = proto.Field(proto.MESSAGE, number=4, message=AddonsConfig,) + + name = proto.Field(proto.STRING, number=6) + + +class SetLocationsRequest(proto.Message): + r"""SetLocationsRequest sets the locations of the cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + locations (Sequence[str]): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing the + locations a cluster is in will result in nodes being either + created or removed from the cluster, depending on whether + locations are being added or removed. + + This list must always include the cluster's primary zone. + name (str): + The name (project, location, cluster) of the cluster to set + locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + locations = proto.RepeatedField(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class UpdateMasterRequest(proto.Message): + r"""UpdateMasterRequest updates the master of the cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + master_version (str): + Required. The Kubernetes version to change + the master to. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + master_version = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=7) + + +class SetMasterAuthRequest(proto.Message): + r"""SetMasterAuthRequest updates the admin password of a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + action (~.cluster_service.SetMasterAuthRequest.Action): + Required. The exact form of action to be + taken on the master auth. + update (~.cluster_service.MasterAuth): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to set + auth. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + class Action(proto.Enum): + r"""Operation type: what type update to perform.""" + UNKNOWN = 0 + SET_PASSWORD = 1 + GENERATE_PASSWORD = 2 + SET_USERNAME = 3 + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + action = proto.Field(proto.ENUM, number=4, enum=Action,) + + update = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,) + + name = proto.Field(proto.STRING, number=7) + + +class DeleteClusterRequest(proto.Message): + r"""DeleteClusterRequest deletes a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to delete. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=4) + + +class ListClustersRequest(proto.Message): + r"""ListClustersRequest lists clusters. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the clusters will be + listed. Specified in the format ``projects/*/locations/*``. + Location "-" matches all zones and all regions. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + parent = proto.Field(proto.STRING, number=4) + + +class ListClustersResponse(proto.Message): + r"""ListClustersResponse is the result of ListClustersRequest. + + Attributes: + clusters (Sequence[~.cluster_service.Cluster]): + A list of clusters in the project in the + specified zone, or across all ones. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + clusters returned may be missing those zones. + """ + + clusters = proto.RepeatedField(proto.MESSAGE, number=1, message=Cluster,) + + missing_zones = proto.RepeatedField(proto.STRING, number=2) + + +class GetOperationRequest(proto.Message): + r"""GetOperationRequest gets a single operation. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + operation_id (str): + Required. Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, operation id) of the operation + to get. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + operation_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=5) + + +class ListOperationsRequest(proto.Message): + r"""ListOperationsRequest lists operations. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This field + has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the operations will + be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all zones + and all regions. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + parent = proto.Field(proto.STRING, number=4) + + +class CancelOperationRequest(proto.Message): + r"""CancelOperationRequest cancels a single operation. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + operation_id (str): + Required. Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, operation id) of the operation + to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + operation_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=4) + + +class ListOperationsResponse(proto.Message): + r"""ListOperationsResponse is the result of + ListOperationsRequest. + + Attributes: + operations (Sequence[~.cluster_service.Operation]): + A list of operations in the project in the + specified zone. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + operations returned may be missing the + operations from those zones. + """ + + operations = proto.RepeatedField(proto.MESSAGE, number=1, message=Operation,) + + missing_zones = proto.RepeatedField(proto.STRING, number=2) + + +class GetServerConfigRequest(proto.Message): + r"""Gets the current Kubernetes Engine service configuration. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated and + replaced by the name field. + name (str): + The name (project and location) of the server config to get, + specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + name = proto.Field(proto.STRING, number=4) + + +class ServerConfig(proto.Message): + r"""Kubernetes Engine service configuration. + + Attributes: + default_cluster_version (str): + Version of Kubernetes the service deploys by + default. + valid_node_versions (Sequence[str]): + List of valid node upgrade target versions. + default_image_type (str): + Default image type. + valid_image_types (Sequence[str]): + List of valid image types. + valid_master_versions (Sequence[str]): + List of valid master versions. + """ + + default_cluster_version = proto.Field(proto.STRING, number=1) + + valid_node_versions = proto.RepeatedField(proto.STRING, number=3) + + default_image_type = proto.Field(proto.STRING, number=4) + + valid_image_types = proto.RepeatedField(proto.STRING, number=5) + + valid_master_versions = proto.RepeatedField(proto.STRING, number=6) + + +class CreateNodePoolRequest(proto.Message): + r"""CreateNodePoolRequest creates a node pool for a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the parent field. + node_pool (~.cluster_service.NodePool): + Required. The node pool to create. + parent (str): + The parent (project, location, cluster id) where the node + pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool = proto.Field(proto.MESSAGE, number=4, message="NodePool",) + + parent = proto.Field(proto.STRING, number=6) + + +class DeleteNodePoolRequest(proto.Message): + r"""DeleteNodePoolRequest deletes a node pool for a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to delete. This field has been deprecated + and replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class ListNodePoolsRequest(proto.Message): + r"""ListNodePoolsRequest lists the node pool(s) for a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the parent field. + parent (str): + The parent (project, location, cluster id) where the node + pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + parent = proto.Field(proto.STRING, number=5) + + +class GetNodePoolRequest(proto.Message): + r"""GetNodePoolRequest retrieves a node pool for a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class NodePool(proto.Message): + r"""NodePool contains the name and configuration for a cluster's + node pool. Node pools are a set of nodes (i.e. VM's), with a + common configuration and specification, under the control of the + cluster master. They may have a set of Kubernetes labels applied + to them, which may be used to reference them during pod + scheduling. They may also be resized up or down, to accommodate + the workload. + + Attributes: + name (str): + The name of the node pool. + config (~.cluster_service.NodeConfig): + The node configuration of the pool. + initial_node_count (int): + The initial node count for the pool. You must ensure that + your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. + self_link (str): + [Output only] Server-defined URL for the resource. + version (str): + The version of the Kubernetes of this node. + instance_group_urls (Sequence[str]): + [Output only] The resource URLs of the `managed instance + groups `__ + associated with this node pool. + status (~.cluster_service.NodePool.Status): + [Output only] The status of the nodes in this pool instance. + status_message (str): + [Output only] Additional information about the current + status of this node pool instance, if available. + autoscaling (~.cluster_service.NodePoolAutoscaling): + Autoscaler configuration for this NodePool. + Autoscaler is enabled only if a valid + configuration is present. + management (~.cluster_service.NodeManagement): + NodeManagement configuration for this + NodePool. + max_pods_constraint (~.cluster_service.MaxPodsConstraint): + The constraint on the maximum number of pods + that can be run simultaneously on a node in the + node pool. + conditions (Sequence[~.cluster_service.StatusCondition]): + Which conditions caused the current node pool + state. + pod_ipv4_cidr_size (int): + [Output only] The pod CIDR block size per node in this node + pool. + """ + + class Status(proto.Enum): + r"""The current status of the node pool instance.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RUNNING_WITH_ERROR = 3 + RECONCILING = 4 + STOPPING = 5 + ERROR = 6 + + name = proto.Field(proto.STRING, number=1) + + config = proto.Field(proto.MESSAGE, number=2, message=NodeConfig,) + + initial_node_count = proto.Field(proto.INT32, number=3) + + self_link = proto.Field(proto.STRING, number=100) + + version = proto.Field(proto.STRING, number=101) + + instance_group_urls = proto.RepeatedField(proto.STRING, number=102) + + status = proto.Field(proto.ENUM, number=103, enum=Status,) + + status_message = proto.Field(proto.STRING, number=104) + + autoscaling = proto.Field(proto.MESSAGE, number=4, message="NodePoolAutoscaling",) + + management = proto.Field(proto.MESSAGE, number=5, message="NodeManagement",) + + max_pods_constraint = proto.Field( + proto.MESSAGE, number=6, message="MaxPodsConstraint", + ) + + conditions = proto.RepeatedField( + proto.MESSAGE, number=105, message="StatusCondition", + ) + + pod_ipv4_cidr_size = proto.Field(proto.INT32, number=7) + + +class NodeManagement(proto.Message): + r"""NodeManagement defines the set of node management services + turned on for the node pool. + + Attributes: + auto_upgrade (bool): + Whether the nodes will be automatically + upgraded. + auto_repair (bool): + Whether the nodes will be automatically + repaired. + upgrade_options (~.cluster_service.AutoUpgradeOptions): + Specifies the Auto Upgrade knobs for the node + pool. + """ + + auto_upgrade = proto.Field(proto.BOOL, number=1) + + auto_repair = proto.Field(proto.BOOL, number=2) + + upgrade_options = proto.Field( + proto.MESSAGE, number=10, message="AutoUpgradeOptions", + ) + + +class AutoUpgradeOptions(proto.Message): + r"""AutoUpgradeOptions defines the set of options for the user to + control how the Auto Upgrades will proceed. + + Attributes: + auto_upgrade_start_time (str): + [Output only] This field is set when upgrades are about to + commence with the approximate start time for the upgrades, + in `RFC3339 `__ text + format. + description (str): + [Output only] This field is set when upgrades are about to + commence with the description of the upgrade. + """ + + auto_upgrade_start_time = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=2) + + +class MaintenancePolicy(proto.Message): + r"""MaintenancePolicy defines the maintenance policy to be used + for the cluster. + + Attributes: + window (~.cluster_service.MaintenanceWindow): + Specifies the maintenance window in which + maintenance may be performed. + resource_version (str): + A hash identifying the version of this + policy, so that updates to fields of the policy + won't accidentally undo intermediate changes + (and so that users of the API unaware of some + fields won't accidentally remove other fields). + Make a get() request to the cluster + to get the current resource version and include + it with requests to set the policy. + """ + + window = proto.Field(proto.MESSAGE, number=1, message="MaintenanceWindow",) + + resource_version = proto.Field(proto.STRING, number=3) + + +class MaintenanceWindow(proto.Message): + r"""MaintenanceWindow defines the maintenance window to be used + for the cluster. + + Attributes: + daily_maintenance_window (~.cluster_service.DailyMaintenanceWindow): + DailyMaintenanceWindow specifies a daily + maintenance operation window. + recurring_window (~.cluster_service.RecurringTimeWindow): + RecurringWindow specifies some number of + recurring time periods for maintenance to occur. + The time windows may be overlapping. If no + maintenance windows are set, maintenance can + occur at any time. + maintenance_exclusions (Sequence[~.cluster_service.MaintenanceWindow.MaintenanceExclusionsEntry]): + Exceptions to maintenance window. Non- + mergency maintenance should not occur in these + windows. + """ + + daily_maintenance_window = proto.Field( + proto.MESSAGE, number=2, message="DailyMaintenanceWindow", + ) + + recurring_window = proto.Field( + proto.MESSAGE, number=3, message="RecurringTimeWindow", + ) + + maintenance_exclusions = proto.MapField( + proto.STRING, proto.MESSAGE, number=4, message="TimeWindow", + ) + + +class TimeWindow(proto.Message): + r"""Represents an arbitrary window of time. + + Attributes: + start_time (~.timestamp.Timestamp): + The time that the window first starts. + end_time (~.timestamp.Timestamp): + The time that the window ends. The end time + should take place after the start time. + """ + + start_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + +class RecurringTimeWindow(proto.Message): + r"""Represents an arbitrary window of time that recurs. + + Attributes: + window (~.cluster_service.TimeWindow): + The window of the first recurrence. + recurrence (str): + An RRULE + (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) + for how this window reccurs. They go on for the + span of time between the start and end time. + + For example, to have something repeat every + weekday, you'd use: + FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR To + repeat some window daily (equivalent to the + DailyMaintenanceWindow): + FREQ=DAILY + For the first weekend of every month: + FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU + This specifies how frequently the window starts. + Eg, if you wanted to have a 9-5 UTC-4 window + every weekday, you'd use something like: + start time = 2019-01-01T09:00:00-0400 + end time = 2019-01-01T17:00:00-0400 + recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR + + Windows can span multiple days. Eg, to make the + window encompass every weekend from midnight + Saturday till the last minute of Sunday UTC: + + start time = 2019-01-05T00:00:00Z + end time = 2019-01-07T23:59:00Z + recurrence = FREQ=WEEKLY;BYDAY=SA + + Note the start and end time's specific dates are + largely arbitrary except to specify duration of + the window and when it first starts. The FREQ + values of HOURLY, MINUTELY, and SECONDLY are not + supported. + """ + + window = proto.Field(proto.MESSAGE, number=1, message=TimeWindow,) + + recurrence = proto.Field(proto.STRING, number=2) + + +class DailyMaintenanceWindow(proto.Message): + r"""Time window specified for daily maintenance operations. + + Attributes: + start_time (str): + Time within the maintenance window to start the maintenance + operations. It must be in format "HH:MM", where HH : [00-23] + and MM : [00-59] GMT. + duration (str): + [Output only] Duration of the time window, automatically + chosen to be smallest possible in the given scenario. + """ + + start_time = proto.Field(proto.STRING, number=2) + + duration = proto.Field(proto.STRING, number=3) + + +class SetNodePoolManagementRequest(proto.Message): + r"""SetNodePoolManagementRequest sets the node management + properties of a node pool. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to update. This field has been deprecated + and replaced by the name field. + management (~.cluster_service.NodeManagement): + Required. NodeManagement configuration for + the node pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set management properties. Specified in the + format ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + management = proto.Field(proto.MESSAGE, number=5, message=NodeManagement,) + + name = proto.Field(proto.STRING, number=7) + + +class SetNodePoolSizeRequest(proto.Message): + r"""SetNodePoolSizeRequest sets the size a node + pool. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to update. This field has been deprecated + and replaced by the name field. + node_count (int): + Required. The desired node count for the + pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set size. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + node_count = proto.Field(proto.INT32, number=5) + + name = proto.Field(proto.STRING, number=7) + + +class RollbackNodePoolUpgradeRequest(proto.Message): + r"""RollbackNodePoolUpgradeRequest rollbacks the previously + Aborted or Failed NodePool upgrade. This will be an no-op if the + last upgrade successfully completed. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to rollback. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to rollback. This field has been deprecated + and replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node poll to rollback upgrade. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + node_pool_id = proto.Field(proto.STRING, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class ListNodePoolsResponse(proto.Message): + r"""ListNodePoolsResponse is the result of ListNodePoolsRequest. + + Attributes: + node_pools (Sequence[~.cluster_service.NodePool]): + A list of node pools for a cluster. + """ + + node_pools = proto.RepeatedField(proto.MESSAGE, number=1, message=NodePool,) + + +class ClusterAutoscaling(proto.Message): + r"""ClusterAutoscaling contains global, per-cluster information + required by Cluster Autoscaler to automatically adjust the size + of the cluster and create/delete + node pools based on the current needs. + + Attributes: + enable_node_autoprovisioning (bool): + Enables automatic node pool creation and + deletion. + resource_limits (Sequence[~.cluster_service.ResourceLimit]): + Contains global constraints regarding minimum + and maximum amount of resources in the cluster. + autoprovisioning_node_pool_defaults (~.cluster_service.AutoprovisioningNodePoolDefaults): + AutoprovisioningNodePoolDefaults contains + defaults for a node pool created by NAP. + autoprovisioning_locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the NodePool's nodes can be created by NAP. + """ + + enable_node_autoprovisioning = proto.Field(proto.BOOL, number=1) + + resource_limits = proto.RepeatedField( + proto.MESSAGE, number=2, message="ResourceLimit", + ) + + autoprovisioning_node_pool_defaults = proto.Field( + proto.MESSAGE, number=4, message="AutoprovisioningNodePoolDefaults", + ) + + autoprovisioning_locations = proto.RepeatedField(proto.STRING, number=5) + + +class AutoprovisioningNodePoolDefaults(proto.Message): + r"""AutoprovisioningNodePoolDefaults contains defaults for a node + pool created by NAP. + + Attributes: + oauth_scopes (Sequence[str]): + Scopes that are used by NAP when creating node pools. If + oauth_scopes are specified, service_account should be empty. + service_account (str): + The Google Cloud Platform Service Account to be used by the + node VMs. If service_account is specified, scopes should be + empty. + """ + + oauth_scopes = proto.RepeatedField(proto.STRING, number=1) + + service_account = proto.Field(proto.STRING, number=2) + + +class ResourceLimit(proto.Message): + r"""Contains information about amount of some resource in the + cluster. For memory, value should be in GB. + + Attributes: + resource_type (str): + Resource name "cpu", "memory" or gpu-specific + string. + minimum (int): + Minimum amount of the resource in the + cluster. + maximum (int): + Maximum amount of the resource in the + cluster. + """ + + resource_type = proto.Field(proto.STRING, number=1) + + minimum = proto.Field(proto.INT64, number=2) + + maximum = proto.Field(proto.INT64, number=3) + + +class NodePoolAutoscaling(proto.Message): + r"""NodePoolAutoscaling contains information required by cluster + autoscaler to adjust the size of the node pool to the current + cluster usage. + + Attributes: + enabled (bool): + Is autoscaling enabled for this node pool. + min_node_count (int): + Minimum number of nodes in the NodePool. Must be >= 1 and <= + max_node_count. + max_node_count (int): + Maximum number of nodes in the NodePool. Must be >= + min_node_count. There has to enough quota to scale up the + cluster. + autoprovisioned (bool): + Can this node pool be deleted automatically. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + min_node_count = proto.Field(proto.INT32, number=2) + + max_node_count = proto.Field(proto.INT32, number=3) + + autoprovisioned = proto.Field(proto.BOOL, number=4) + + +class SetLabelsRequest(proto.Message): + r"""SetLabelsRequest sets the Google Cloud Platform labels on a + Google Container Engine cluster, which will in turn set them for + Google Compute Engine resources used by that cluster + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + resource_labels (Sequence[~.cluster_service.SetLabelsRequest.ResourceLabelsEntry]): + Required. The labels to set for that cluster. + label_fingerprint (str): + Required. The fingerprint of the previous set + of labels for this resource, used to detect + conflicts. The fingerprint is initially + generated by Kubernetes Engine and changes after + every request to modify or update labels. You + must always provide an up-to-date fingerprint + hash when updating or changing labels. Make a + get() request to the resource to + get the latest fingerprint. + name (str): + The name (project, location, cluster id) of the cluster to + set labels. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + resource_labels = proto.MapField(proto.STRING, proto.STRING, number=4) + + label_fingerprint = proto.Field(proto.STRING, number=5) + + name = proto.Field(proto.STRING, number=7) + + +class SetLegacyAbacRequest(proto.Message): + r"""SetLegacyAbacRequest enables or disables the ABAC + authorization mechanism for a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to update. This field has been deprecated and + replaced by the name field. + enabled (bool): + Required. Whether ABAC authorization will be + enabled in the cluster. + name (str): + The name (project, location, cluster id) of the cluster to + set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + enabled = proto.Field(proto.BOOL, number=4) + + name = proto.Field(proto.STRING, number=6) + + +class StartIPRotationRequest(proto.Message): + r"""StartIPRotationRequest creates a new IP for the cluster and + then performs a node upgrade on each node pool to point to the + new IP. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster id) of the cluster to + start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + rotate_credentials (bool): + Whether to rotate credentials during IP + rotation. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=6) + + rotate_credentials = proto.Field(proto.BOOL, number=7) + + +class CompleteIPRotationRequest(proto.Message): + r"""CompleteIPRotationRequest moves the cluster master back into + single-IP mode. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster id) of the cluster to + complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + name = proto.Field(proto.STRING, number=7) + + +class AcceleratorConfig(proto.Message): + r"""AcceleratorConfig represents a Hardware Accelerator request. + + Attributes: + accelerator_count (int): + The number of the accelerator cards exposed + to an instance. + accelerator_type (str): + The accelerator type resource name. List of supported + accelerators + `here `__ + """ + + accelerator_count = proto.Field(proto.INT64, number=1) + + accelerator_type = proto.Field(proto.STRING, number=2) + + +class WorkloadMetadataConfig(proto.Message): + r"""WorkloadMetadataConfig defines the metadata configuration to + expose to workloads on the node pool. + + Attributes: + node_metadata (~.cluster_service.WorkloadMetadataConfig.NodeMetadata): + NodeMetadata is the configuration for how to + expose metadata to the workloads running on the + node. + """ + + class NodeMetadata(proto.Enum): + r"""NodeMetadata is the configuration for if and how to expose + the node metadata to the workload running on the node. + """ + UNSPECIFIED = 0 + SECURE = 1 + EXPOSE = 2 + + node_metadata = proto.Field(proto.ENUM, number=1, enum=NodeMetadata,) + + +class SetNetworkPolicyRequest(proto.Message): + r"""SetNetworkPolicyRequest enables/disables network policy for a + cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + network_policy (~.cluster_service.NetworkPolicy): + Required. Configuration options for the + NetworkPolicy feature. + name (str): + The name (project, location, cluster id) of the cluster to + set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + network_policy = proto.Field(proto.MESSAGE, number=4, message=NetworkPolicy,) + + name = proto.Field(proto.STRING, number=6) + + +class SetMaintenancePolicyRequest(proto.Message): + r"""SetMaintenancePolicyRequest sets the maintenance policy for a + cluster. + + Attributes: + project_id (str): + Required. The Google Developers Console `project ID or + project + number `__. + zone (str): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + cluster_id (str): + Required. The name of the cluster to update. + maintenance_policy (~.cluster_service.MaintenancePolicy): + Required. The maintenance policy to be set + for the cluster. An empty field clears the + existing maintenance policy. + name (str): + The name (project, location, cluster id) of the cluster to + set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field(proto.STRING, number=1) + + zone = proto.Field(proto.STRING, number=2) + + cluster_id = proto.Field(proto.STRING, number=3) + + maintenance_policy = proto.Field( + proto.MESSAGE, number=4, message=MaintenancePolicy, + ) + + name = proto.Field(proto.STRING, number=5) + + +class ListLocationsRequest(proto.Message): + r"""ListLocationsRequest is used to request the locations that + offer GKE. + + Attributes: + parent (str): + Required. Contains the name of the resource requested. + Specified in the format ``projects/*``. + """ + + parent = proto.Field(proto.STRING, number=1) + + +class ListLocationsResponse(proto.Message): + r"""ListLocationsResponse returns the list of all GKE locations + and their recommendation state. + + Attributes: + locations (Sequence[~.cluster_service.Location]): + A full list of GKE locations. + next_page_token (str): + Only return ListLocationsResponse that occur after the + page_token. This value should be populated from the + ListLocationsResponse.next_page_token if that response token + was set (which happens when listing more Locations than fit + in a single ListLocationsResponse). + """ + + @property + def raw_page(self): + return self + + locations = proto.RepeatedField(proto.MESSAGE, number=1, message="Location",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class Location(proto.Message): + r"""Location returns the location name, and if the location is + recommended for GKE cluster scheduling. + + Attributes: + type (~.cluster_service.Location.LocationType): + Contains the type of location this Location + is for. Regional or Zonal. + name (str): + Contains the name of the resource requested. Specified in + the format ``projects/*/locations/*``. + recommended (bool): + Whether the location is recomended for GKE + cluster scheduling. + """ + + class LocationType(proto.Enum): + r"""LocationType is the type of GKE location, regional or zonal.""" + LOCATION_TYPE_UNSPECIFIED = 0 + ZONE = 1 + REGION = 2 + + type = proto.Field(proto.ENUM, number=1, enum=LocationType,) + + name = proto.Field(proto.STRING, number=2) + + recommended = proto.Field(proto.BOOL, number=3) + + +class StatusCondition(proto.Message): + r"""StatusCondition describes why a cluster or a node pool has a + certain status (e.g., ERROR or DEGRADED). + + Attributes: + code (~.cluster_service.StatusCondition.Code): + Machine-friendly representation of the + condition + message (str): + Human-friendly representation of the + condition + """ + + class Code(proto.Enum): + r"""Code for each condition""" + UNKNOWN = 0 + GCE_STOCKOUT = 1 + GKE_SERVICE_ACCOUNT_DELETED = 2 + GCE_QUOTA_EXCEEDED = 3 + SET_BY_OPERATOR = 4 + CLOUD_KMS_KEY_ERROR = 7 + + code = proto.Field(proto.ENUM, number=1, enum=Code,) + + message = proto.Field(proto.STRING, number=2) + + +class NetworkConfig(proto.Message): + r"""NetworkConfig reports the relative names of network & + subnetwork. + + Attributes: + network (str): + Output only. The relative name of the Google Compute Engine + [network]`google.container.v1beta1.NetworkConfig.network `__ + to which the cluster is connected. Example: + projects/my-project/global/networks/my-network + subnetwork (str): + Output only. The relative name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. Example: + projects/my-project/regions/us-central1/subnetworks/my-subnet + enable_intra_node_visibility (bool): + Whether Intra-node visibility is enabled for + this cluster. This makes same node pod to pod + traffic visible for VPC network. + """ + + network = proto.Field(proto.STRING, number=1) + + subnetwork = proto.Field(proto.STRING, number=2) + + enable_intra_node_visibility = proto.Field(proto.BOOL, number=5) + + +class ListUsableSubnetworksRequest(proto.Message): + r"""ListUsableSubnetworksRequest requests the list of usable + subnetworks. available to a user for creating clusters. + + Attributes: + parent (str): + Required. The parent project where subnetworks are usable. + Specified in the format ``projects/*``. + filter (str): + Filtering currently only supports equality on the + networkProjectId and must be in the form: + "networkProjectId=[PROJECTID]", where ``networkProjectId`` + is the project which owns the listed subnetworks. This + defaults to the parent project ID. + page_size (int): + The max number of results per page that should be returned. + If the number of available results is larger than + ``page_size``, a ``next_page_token`` is returned which can + be used to get the next page of results in subsequent + requests. Acceptable values are 0 to 500, inclusive. + (Default: 500) + page_token (str): + Specifies a page token to use. Set this to + the nextPageToken returned by previous list + requests to get the next page of results. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListUsableSubnetworksResponse(proto.Message): + r"""ListUsableSubnetworksResponse is the response of + ListUsableSubnetworksRequest. + + Attributes: + subnetworks (Sequence[~.cluster_service.UsableSubnetwork]): + A list of usable subnetworks in the specified + network project. + next_page_token (str): + This token allows you to get the next page of results for + list requests. If the number of results is larger than + ``page_size``, use the ``next_page_token`` as a value for + the query parameter ``page_token`` in the next request. The + value will become empty when there are no more pages. + """ + + @property + def raw_page(self): + return self + + subnetworks = proto.RepeatedField( + proto.MESSAGE, number=1, message="UsableSubnetwork", + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UsableSubnetworkSecondaryRange(proto.Message): + r"""Secondary IP range of a usable subnetwork. + + Attributes: + range_name (str): + The name associated with this subnetwork + secondary range, used when adding an alias IP + range to a VM instance. + ip_cidr_range (str): + The range of IP addresses belonging to this + subnetwork secondary range. + status (~.cluster_service.UsableSubnetworkSecondaryRange.Status): + This field is to determine the status of the + secondary range programmably. + """ + + class Status(proto.Enum): + r"""Status shows the current usage of a secondary IP range.""" + UNKNOWN = 0 + UNUSED = 1 + IN_USE_SERVICE = 2 + IN_USE_SHAREABLE_POD = 3 + IN_USE_MANAGED_POD = 4 + + range_name = proto.Field(proto.STRING, number=1) + + ip_cidr_range = proto.Field(proto.STRING, number=2) + + status = proto.Field(proto.ENUM, number=3, enum=Status,) + + +class UsableSubnetwork(proto.Message): + r"""UsableSubnetwork resource returns the subnetwork name, its + associated network and the primary CIDR range. + + Attributes: + subnetwork (str): + Subnetwork Name. + Example: projects/my-project/regions/us- + central1/subnetworks/my-subnet + network (str): + Network Name. + Example: projects/my-project/global/networks/my- + network + ip_cidr_range (str): + The range of internal addresses that are + owned by this subnetwork. + secondary_ip_ranges (Sequence[~.cluster_service.UsableSubnetworkSecondaryRange]): + Secondary IP ranges. + status_message (str): + A human readable status message representing the reasons for + cases where the caller cannot use the secondary ranges under + the subnet. For example if the secondary_ip_ranges is empty + due to a permission issue, an insufficient permission + message will be given by status_message. + """ + + subnetwork = proto.Field(proto.STRING, number=1) + + network = proto.Field(proto.STRING, number=2) + + ip_cidr_range = proto.Field(proto.STRING, number=3) + + secondary_ip_ranges = proto.RepeatedField( + proto.MESSAGE, number=4, message=UsableSubnetworkSecondaryRange, + ) + + status_message = proto.Field(proto.STRING, number=5) + + +class VerticalPodAutoscaling(proto.Message): + r"""VerticalPodAutoscaling contains global, per-cluster + information required by Vertical Pod Autoscaler to automatically + adjust the resources of pods controlled by it. + + Attributes: + enabled (bool): + Enables vertical pod autoscaling. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class IntraNodeVisibilityConfig(proto.Message): + r"""IntraNodeVisibilityConfig contains the desired config of the + intra-node visibility on this cluster. + + Attributes: + enabled (bool): + Enables intra node visibility for this + cluster. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + +class MaxPodsConstraint(proto.Message): + r"""Constraints applied to pods. + + Attributes: + max_pods_per_node (int): + Constraint enforced on the max num of pods + per node. + """ + + max_pods_per_node = proto.Field(proto.INT64, number=1) + + +class DatabaseEncryption(proto.Message): + r"""Configuration of etcd encryption. + + Attributes: + state (~.cluster_service.DatabaseEncryption.State): + Denotes the state of etcd encryption. + key_name (str): + Name of CloudKMS key to use for the + encryption of secrets in etcd. Ex. projects/my- + project/locations/global/keyRings/my- + ring/cryptoKeys/my-key + """ + + class State(proto.Enum): + r"""State of etcd encryption.""" + UNKNOWN = 0 + ENCRYPTED = 1 + DECRYPTED = 2 + + state = proto.Field(proto.ENUM, number=2, enum=State,) + + key_name = proto.Field(proto.STRING, number=1) + + +class ResourceUsageExportConfig(proto.Message): + r"""Configuration for exporting cluster resource usages. + + Attributes: + bigquery_destination (~.cluster_service.ResourceUsageExportConfig.BigQueryDestination): + Configuration to use BigQuery as usage export + destination. + enable_network_egress_metering (bool): + Whether to enable network egress metering for + this cluster. If enabled, a daemonset will be + created in the cluster to meter network egress + traffic. + consumption_metering_config (~.cluster_service.ResourceUsageExportConfig.ConsumptionMeteringConfig): + Configuration to enable resource consumption + metering. + """ + + class BigQueryDestination(proto.Message): + r"""Parameters for using BigQuery as the destination of resource + usage export. + + Attributes: + dataset_id (str): + The ID of a BigQuery Dataset. + """ + + dataset_id = proto.Field(proto.STRING, number=1) + + class ConsumptionMeteringConfig(proto.Message): + r"""Parameters for controlling consumption metering. + + Attributes: + enabled (bool): + Whether to enable consumption metering for + this cluster. If enabled, a second BigQuery + table will be created to hold resource + consumption records. + """ + + enabled = proto.Field(proto.BOOL, number=1) + + bigquery_destination = proto.Field( + proto.MESSAGE, number=1, message=BigQueryDestination, + ) + + enable_network_egress_metering = proto.Field(proto.BOOL, number=2) + + consumption_metering_config = proto.Field( + proto.MESSAGE, number=3, message=ConsumptionMeteringConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/noxfile.py b/noxfile.py index fd406dfa..9608c849 100644 --- a/noxfile.py +++ b/noxfile.py @@ -27,8 +27,8 @@ BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -70,6 +70,8 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + session.install("asyncmock", "pytest-asyncio") + session.install("mock", "pytest", "pytest-cov") session.install("-e", ".") @@ -135,7 +137,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=70") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") @@ -150,7 +152,7 @@ def docs(session): shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( "sphinx-build", - "-W", # warnings as errors + # "-W", # warnings as errors "-T", # show full traceback on exception "-N", # no colors "-b", diff --git a/samples/AUTHORING_GUIDE.md b/samples/AUTHORING_GUIDE.md new file mode 100644 index 00000000..55c97b32 --- /dev/null +++ b/samples/AUTHORING_GUIDE.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/samples/CONTRIBUTING.md b/samples/CONTRIBUTING.md new file mode 100644 index 00000000..34c882b6 --- /dev/null +++ b/samples/CONTRIBUTING.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md \ No newline at end of file diff --git a/scripts/fixup_container_v1_keywords.py b/scripts/fixup_container_v1_keywords.py new file mode 100644 index 00000000..8bb63c78 --- /dev/null +++ b/scripts/fixup_container_v1_keywords.py @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class containerCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'complete_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', ), + 'create_cluster': ('cluster', 'project_id', 'zone', 'parent', ), + 'create_node_pool': ('node_pool', 'project_id', 'zone', 'cluster_id', 'parent', ), + 'delete_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'delete_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'get_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'get_server_config': ('project_id', 'zone', 'name', ), + 'list_clusters': ('project_id', 'zone', 'parent', ), + 'list_node_pools': ('project_id', 'zone', 'cluster_id', 'parent', ), + 'list_operations': ('project_id', 'zone', 'parent', ), + 'list_usable_subnetworks': ('parent', 'filter', 'page_size', 'page_token', ), + 'rollback_node_pool_upgrade': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_addons_config': ('addons_config', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_labels': ('resource_labels', 'label_fingerprint', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_legacy_abac': ('enabled', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_locations': ('locations', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_logging_service': ('logging_service', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_maintenance_policy': ('project_id', 'zone', 'cluster_id', 'maintenance_policy', 'name', ), + 'set_master_auth': ('action', 'update', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_monitoring_service': ('monitoring_service', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_network_policy': ('network_policy', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_node_pool_autoscaling': ('autoscaling', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_node_pool_management': ('management', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_node_pool_size': ('node_count', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'start_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', 'rotate_credentials', ), + 'update_cluster': ('update', 'project_id', 'zone', 'cluster_id', 'name', ), + 'update_master': ('master_version', 'project_id', 'zone', 'cluster_id', 'name', ), + 'update_node_pool': ('node_version', 'image_type', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=containerCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the container client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/scripts/fixup_container_v1beta1_keywords.py b/scripts/fixup_container_v1beta1_keywords.py new file mode 100644 index 00000000..61c23309 --- /dev/null +++ b/scripts/fixup_container_v1beta1_keywords.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class containerCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'complete_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', ), + 'create_cluster': ('project_id', 'zone', 'cluster', 'parent', ), + 'create_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool', 'parent', ), + 'delete_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'delete_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'get_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'get_server_config': ('project_id', 'zone', 'name', ), + 'list_clusters': ('project_id', 'zone', 'parent', ), + 'list_locations': ('parent', ), + 'list_node_pools': ('project_id', 'zone', 'cluster_id', 'parent', ), + 'list_operations': ('project_id', 'zone', 'parent', ), + 'list_usable_subnetworks': ('parent', 'filter', 'page_size', 'page_token', ), + 'rollback_node_pool_upgrade': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_addons_config': ('project_id', 'zone', 'cluster_id', 'addons_config', 'name', ), + 'set_labels': ('project_id', 'zone', 'cluster_id', 'resource_labels', 'label_fingerprint', 'name', ), + 'set_legacy_abac': ('project_id', 'zone', 'cluster_id', 'enabled', 'name', ), + 'set_locations': ('project_id', 'zone', 'cluster_id', 'locations', 'name', ), + 'set_logging_service': ('project_id', 'zone', 'cluster_id', 'logging_service', 'name', ), + 'set_maintenance_policy': ('project_id', 'zone', 'cluster_id', 'maintenance_policy', 'name', ), + 'set_master_auth': ('project_id', 'zone', 'cluster_id', 'action', 'update', 'name', ), + 'set_monitoring_service': ('project_id', 'zone', 'cluster_id', 'monitoring_service', 'name', ), + 'set_network_policy': ('project_id', 'zone', 'cluster_id', 'network_policy', 'name', ), + 'set_node_pool_autoscaling': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'autoscaling', 'name', ), + 'set_node_pool_management': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'management', 'name', ), + 'set_node_pool_size': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'node_count', 'name', ), + 'start_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', 'rotate_credentials', ), + 'update_cluster': ('project_id', 'zone', 'cluster_id', 'update', 'name', ), + 'update_master': ('project_id', 'zone', 'cluster_id', 'master_version', 'name', ), + 'update_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'node_version', 'image_type', 'workload_metadata_config', 'name', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=containerCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the container client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/setup.py b/setup.py index 08ba0a46..f3f19aa8 100644 --- a/setup.py +++ b/setup.py @@ -29,8 +29,10 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.21.0, < 2.0.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", + "proto-plus >= 0.4.0", + "libcst >= 0.2.5", ] extras = {} @@ -46,7 +48,9 @@ # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() if package.startswith("google") + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") ] # Determine which namespaces are needed. @@ -69,12 +73,10 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "Topic :: Internet", ], @@ -83,7 +85,11 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.6", + scripts=[ + "scripts/fixup_container_v1_keywords.py", + "scripts/fixup_container_v1beta1_keywords.py", + ], include_package_data=True, zip_safe=False, ) diff --git a/synth.metadata b/synth.metadata index 5115fef0..602ab60c 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,22 +4,14 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-container.git", - "sha": "39dd6e5d3652084a558e4be02cbfa586615762cf" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "cf41866c6f14f10a07aa1e2a1260fc0a2727d889", - "internalRef": "317812187" + "sha": "761f12af56bc24306b874c5fc60a8c958232a252" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "303271797a360f8a439203413f13a160f2f5b3b4" + "sha": "2a5693326b1e708ea8464b4cb06ea2894691c365" } } ], diff --git a/synth.py b/synth.py index 5e1e6567..0b0c79d7 100644 --- a/synth.py +++ b/synth.py @@ -29,13 +29,39 @@ version=version, bazel_target=f"//google/container/{version}:container-{version}-py", include_protos=True, + proto_output_path=f"google/container_{version}/proto", ) - s.move(library / f"google/cloud/container_{version}") - s.move(library / f"tests/unit/gapic/{version}") + s.move(library / "google/container", "google/cloud/container") + s.move( + library / f"google/container_{version}", + f"google/cloud/container_{version}" + ) + s.move(library / "tests") + s.move(library / "scripts") + s.move(library / "docs", excludes=[library / "docs/index.rst"]) -# Use the highest version library to generate import alias. -s.move(library / "google/cloud/container.py") + # Fix namespace + s.replace( + f"google/cloud/**/*.py", + f"google.container_{version}", + f"google.cloud.container_{version}", + ) + s.replace( + f"tests/unit/gapic/**/*.py", + f"google.container_{version}", + f"google.cloud.container_{version}", + ) + s.replace( + f"google/cloud/**/*.py", + f"google.container_{version}", + f"google.cloud.container_{version}", + ) + s.replace( + f"docs/**/*.rst", + f"google.container_{version}", + f"google.cloud.container_{version}", + ) # Issues exist where python files should define the source encoding # https://github.com/googleapis/gapic-generator/issues/2097 @@ -60,10 +86,19 @@ # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=0, cov_level=70) -s.move(templated_files) +templated_files = common.py_library( + samples=False, # set to True only if there are samples + microgenerator=True, + cov_level=99, +) +s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file + # TODO(busunkim): Use latest sphinx after microgenerator transition s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') +# Temporarily disable warnings due to +# https://github.com/googleapis/gapic-generator-python/issues/525 +s.replace("noxfile.py", '[\"\']-W[\"\']', '# "-W"') + s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/tests/system/gapic/v1/test_system_cluster_manager_v1.py b/tests/system/gapic/v1/test_system_cluster_manager_v1.py index edd29d99..23bcb9ab 100644 --- a/tests/system/gapic/v1/test_system_cluster_manager_v1.py +++ b/tests/system/gapic/v1/test_system_cluster_manager_v1.py @@ -16,7 +16,6 @@ import time from google.cloud import container_v1 -from google.cloud.container_v1.proto import cluster_service_pb2 class TestSystemClusterManager(object): @@ -24,6 +23,6 @@ def test_list_clusters(self): project_id = os.environ["PROJECT_ID"] client = container_v1.ClusterManagerClient() - project_id_2 = project_id - zone = "us-central1-a" - response = client.list_clusters(project_id_2, zone) + response = client.list_clusters( + request={"project_id": os.environ["PROJECT_ID"], "zone": "us-central1-a"} + ) diff --git a/google/cloud/container_v1/gapic/__init__.py b/tests/unit/gapic/container_v1/__init__.py similarity index 100% rename from google/cloud/container_v1/gapic/__init__.py rename to tests/unit/gapic/container_v1/__init__.py diff --git a/tests/unit/gapic/container_v1/test_cluster_manager.py b/tests/unit/gapic/container_v1/test_cluster_manager.py new file mode 100644 index 00000000..edcf004c --- /dev/null +++ b/tests/unit/gapic/container_v1/test_cluster_manager.py @@ -0,0 +1,8450 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.container_v1.services.cluster_manager import ClusterManagerAsyncClient +from google.cloud.container_v1.services.cluster_manager import ClusterManagerClient +from google.cloud.container_v1.services.cluster_manager import pagers +from google.cloud.container_v1.services.cluster_manager import transports +from google.cloud.container_v1.types import cluster_service +from google.oauth2 import service_account +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterManagerClient._get_default_mtls_endpoint(None) is None + assert ( + ClusterManagerClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [ClusterManagerClient, ClusterManagerAsyncClient] +) +def test_cluster_manager_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "container.googleapis.com:443" + + +def test_cluster_manager_client_get_transport_class(): + transport = ClusterManagerClient.get_transport_class() + assert transport == transports.ClusterManagerGrpcTransport + + transport = ClusterManagerClient.get_transport_class("grpc") + assert transport == transports.ClusterManagerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_manager_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterManagerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterManagerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + os.environ["GOOGLE_API_USE_MTLS"] = "never" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + os.environ["GOOGLE_API_USE_MTLS"] = "always" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + os.environ["GOOGLE_API_USE_MTLS"] = "auto" + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + os.environ["GOOGLE_API_USE_MTLS"] = "auto" + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + os.environ["GOOGLE_API_USE_MTLS"] = "auto" + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported" + with pytest.raises(MutualTLSChannelError): + client = client_class() + + del os.environ["GOOGLE_API_USE_MTLS"] + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_manager_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_manager_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + +def test_cluster_manager_client_client_options_from_dict(): + with mock.patch( + "google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ClusterManagerClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + ) + + +def test_list_clusters(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListClustersRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse( + missing_zones=["missing_zones_value"], + ) + + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListClustersRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListClustersResponse(missing_zones=["missing_zones_value"],) + ) + + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +def test_list_clusters_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + call.return_value = cluster_service.ListClustersResponse() + + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListClustersResponse() + ) + + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_clusters_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id="project_id_value", zone="zone_value", parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].parent == "parent_value" + + +def test_list_clusters_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + cluster_service.ListClustersRequest(), + project_id="project_id_value", + zone="zone_value", + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id="project_id_value", zone="zone_value", parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + cluster_service.ListClustersRequest(), + project_id="project_id_value", + zone="zone_value", + parent="parent_value", + ) + + +def test_get_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster( + name="name_value", + description="description_value", + initial_node_count=1911, + logging_service="logging_service_value", + monitoring_service="monitoring_service_value", + network="network_value", + cluster_ipv4_cidr="cluster_ipv4_cidr_value", + subnetwork="subnetwork_value", + locations=["locations_value"], + enable_kubernetes_alpha=True, + label_fingerprint="label_fingerprint_value", + self_link="self_link_value", + zone="zone_value", + endpoint="endpoint_value", + initial_cluster_version="initial_cluster_version_value", + current_master_version="current_master_version_value", + current_node_version="current_node_version_value", + create_time="create_time_value", + status=cluster_service.Cluster.Status.PROVISIONING, + status_message="status_message_value", + node_ipv4_cidr_size=1955, + services_ipv4_cidr="services_ipv4_cidr_value", + instance_group_urls=["instance_group_urls_value"], + current_node_count=1936, + expire_time="expire_time_value", + location="location_value", + enable_tpu=True, + tpu_ipv4_cidr_block="tpu_ipv4_cidr_block_value", + ) + + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.initial_node_count == 1911 + + assert response.logging_service == "logging_service_value" + + assert response.monitoring_service == "monitoring_service_value" + + assert response.network == "network_value" + + assert response.cluster_ipv4_cidr == "cluster_ipv4_cidr_value" + + assert response.subnetwork == "subnetwork_value" + + assert response.locations == ["locations_value"] + + assert response.enable_kubernetes_alpha is True + + assert response.label_fingerprint == "label_fingerprint_value" + + assert response.self_link == "self_link_value" + + assert response.zone == "zone_value" + + assert response.endpoint == "endpoint_value" + + assert response.initial_cluster_version == "initial_cluster_version_value" + + assert response.current_master_version == "current_master_version_value" + + assert response.current_node_version == "current_node_version_value" + + assert response.create_time == "create_time_value" + + assert response.status == cluster_service.Cluster.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.node_ipv4_cidr_size == 1955 + + assert response.services_ipv4_cidr == "services_ipv4_cidr_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.current_node_count == 1936 + + assert response.expire_time == "expire_time_value" + + assert response.location == "location_value" + + assert response.enable_tpu is True + + assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value" + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Cluster( + name="name_value", + description="description_value", + initial_node_count=1911, + logging_service="logging_service_value", + monitoring_service="monitoring_service_value", + network="network_value", + cluster_ipv4_cidr="cluster_ipv4_cidr_value", + subnetwork="subnetwork_value", + locations=["locations_value"], + enable_kubernetes_alpha=True, + label_fingerprint="label_fingerprint_value", + self_link="self_link_value", + zone="zone_value", + endpoint="endpoint_value", + initial_cluster_version="initial_cluster_version_value", + current_master_version="current_master_version_value", + current_node_version="current_node_version_value", + create_time="create_time_value", + status=cluster_service.Cluster.Status.PROVISIONING, + status_message="status_message_value", + node_ipv4_cidr_size=1955, + services_ipv4_cidr="services_ipv4_cidr_value", + instance_group_urls=["instance_group_urls_value"], + current_node_count=1936, + expire_time="expire_time_value", + location="location_value", + enable_tpu=True, + tpu_ipv4_cidr_block="tpu_ipv4_cidr_block_value", + ) + ) + + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.initial_node_count == 1911 + + assert response.logging_service == "logging_service_value" + + assert response.monitoring_service == "monitoring_service_value" + + assert response.network == "network_value" + + assert response.cluster_ipv4_cidr == "cluster_ipv4_cidr_value" + + assert response.subnetwork == "subnetwork_value" + + assert response.locations == ["locations_value"] + + assert response.enable_kubernetes_alpha is True + + assert response.label_fingerprint == "label_fingerprint_value" + + assert response.self_link == "self_link_value" + + assert response.zone == "zone_value" + + assert response.endpoint == "endpoint_value" + + assert response.initial_cluster_version == "initial_cluster_version_value" + + assert response.current_master_version == "current_master_version_value" + + assert response.current_node_version == "current_node_version_value" + + assert response.create_time == "create_time_value" + + assert response.status == cluster_service.Cluster.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.node_ipv4_cidr_size == 1955 + + assert response.services_ipv4_cidr == "services_ipv4_cidr_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.current_node_count == 1936 + + assert response.expire_time == "expire_time_value" + + assert response.location == "location_value" + + assert response.enable_tpu is True + + assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value" + + +def test_get_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + call.return_value = cluster_service.Cluster() + + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Cluster() + ) + + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +def test_get_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + cluster_service.GetClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Cluster() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + cluster_service.GetClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +def test_create_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_create_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") + assert args[0].parent == "parent_value" + + +def test_create_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + parent="parent_value", + ) + + +def test_update_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_update_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_update_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ) + assert args[0].name == "name_value" + + +def test_update_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ) + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + name="name_value", + ) + + +def test_update_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_update_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_update_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_node_pool), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_node_pool_autoscaling(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolAutoscalingRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolAutoscalingRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_node_pool_autoscaling_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_logging_service(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLoggingServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_logging_service_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLoggingServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_logging_service_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_logging_service), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_logging_service_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_logging_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_logging_service_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_logging_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" + assert args[0].name == "name_value" + + +def test_set_logging_service_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_logging_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + name="name_value", + ) + + +def test_set_monitoring_service(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMonitoringServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMonitoringServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_monitoring_service_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_monitoring_service), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_monitoring_service_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_monitoring_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_monitoring_service_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_monitoring_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" + assert args[0].name == "name_value" + + +def test_set_monitoring_service_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_monitoring_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + name="name_value", + ) + + +def test_set_addons_config(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetAddonsConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_addons_config_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetAddonsConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_addons_config_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_addons_config), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_addons_config_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_addons_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_addons_config_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_addons_config( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ) + assert args[0].name == "name_value" + + +def test_set_addons_config_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_addons_config( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ) + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + name="name_value", + ) + + +def test_set_locations(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_locations_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_locations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_locations_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_locations), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_locations_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_locations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_locations_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_locations( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] + assert args[0].name == "name_value" + + +def test_set_locations_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_locations( + cluster_service.SetLocationsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_set_locations_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_locations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_locations( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_set_locations_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_locations( + cluster_service.SetLocationsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + name="name_value", + ) + + +def test_update_master(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateMasterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_master), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_update_master_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateMasterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_master), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_update_master_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_master), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_master_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_master), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_update_master_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_master), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_master( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" + assert args[0].name == "name_value" + + +def test_update_master_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_master( + cluster_service.UpdateMasterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_update_master_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_master), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_master( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_update_master_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_master( + cluster_service.UpdateMasterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + name="name_value", + ) + + +def test_set_master_auth(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMasterAuthRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_master_auth_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMasterAuthRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_master_auth), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_master_auth_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_master_auth_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_master_auth), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_delete_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +def test_delete_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +def test_list_operations(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse( + missing_zones=["missing_zones_value"], + ) + + response = client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListOperationsResponse( + missing_zones=["missing_zones_value"], + ) + ) + + response = await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +def test_list_operations_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_operations), "__call__") as call: + call.return_value = cluster_service.ListOperationsResponse() + + client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_operations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListOperationsResponse() + ) + + await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_operations_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_operations( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +def test_list_operations_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_operations( + cluster_service.ListOperationsRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +@pytest.mark.asyncio +async def test_list_operations_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListOperationsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_operations( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +@pytest.mark.asyncio +async def test_list_operations_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_operations( + cluster_service.ListOperationsRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +def test_get_operation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_get_operation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_operation), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_operation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_operation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + + +def test_get_operation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_operation( + cluster_service.GetOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + +@pytest.mark.asyncio +async def test_get_operation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + + +@pytest.mark.asyncio +async def test_get_operation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_operation( + cluster_service.GetOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + +def test_cancel_operation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.cancel_operation), "__call__" + ) as call: + call.return_value = None + + client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_operation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_cancel_operation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + assert args[0].name == "name_value" + + +def test_cancel_operation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + name="name_value", + ) + + +def test_get_server_config(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetServerConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig( + default_cluster_version="default_cluster_version_value", + valid_node_versions=["valid_node_versions_value"], + default_image_type="default_image_type_value", + valid_image_types=["valid_image_types_value"], + valid_master_versions=["valid_master_versions_value"], + ) + + response = client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + + assert response.default_cluster_version == "default_cluster_version_value" + + assert response.valid_node_versions == ["valid_node_versions_value"] + + assert response.default_image_type == "default_image_type_value" + + assert response.valid_image_types == ["valid_image_types_value"] + + assert response.valid_master_versions == ["valid_master_versions_value"] + + +@pytest.mark.asyncio +async def test_get_server_config_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetServerConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ServerConfig( + default_cluster_version="default_cluster_version_value", + valid_node_versions=["valid_node_versions_value"], + default_image_type="default_image_type_value", + valid_image_types=["valid_image_types_value"], + valid_master_versions=["valid_master_versions_value"], + ) + ) + + response = await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + + assert response.default_cluster_version == "default_cluster_version_value" + + assert response.valid_node_versions == ["valid_node_versions_value"] + + assert response.default_image_type == "default_image_type_value" + + assert response.valid_image_types == ["valid_image_types_value"] + + assert response.valid_master_versions == ["valid_master_versions_value"] + + +def test_get_server_config_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_server_config), "__call__" + ) as call: + call.return_value = cluster_service.ServerConfig() + + client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_server_config_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_server_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ServerConfig() + ) + + await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_server_config_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_server_config( + project_id="project_id_value", zone="zone_value", name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].name == "name_value" + + +def test_get_server_config_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id="project_id_value", + zone="zone_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ServerConfig() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_server_config( + project_id="project_id_value", zone="zone_value", name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id="project_id_value", + zone="zone_value", + name="name_value", + ) + + +def test_list_node_pools(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListNodePoolsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + response = client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +@pytest.mark.asyncio +async def test_list_node_pools_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListNodePoolsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_node_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListNodePoolsResponse() + ) + + response = await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +def test_list_node_pools_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: + call.return_value = cluster_service.ListNodePoolsResponse() + + client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_node_pools_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_node_pools), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListNodePoolsResponse() + ) + + await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_node_pools_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_node_pools( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].parent == "parent_value" + + +def test_list_node_pools_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_node_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListNodePoolsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_node_pools( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + parent="parent_value", + ) + + +def test_get_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool( + name="name_value", + initial_node_count=1911, + self_link="self_link_value", + version="version_value", + instance_group_urls=["instance_group_urls_value"], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message="status_message_value", + pod_ipv4_cidr_size=1856, + ) + + response = client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + + assert response.name == "name_value" + + assert response.initial_node_count == 1911 + + assert response.self_link == "self_link_value" + + assert response.version == "version_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.status == cluster_service.NodePool.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.pod_ipv4_cidr_size == 1856 + + +@pytest.mark.asyncio +async def test_get_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.NodePool( + name="name_value", + initial_node_count=1911, + self_link="self_link_value", + version="version_value", + instance_group_urls=["instance_group_urls_value"], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message="status_message_value", + pod_ipv4_cidr_size=1856, + ) + ) + + response = await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + + assert response.name == "name_value" + + assert response.initial_node_count == 1911 + + assert response.self_link == "self_link_value" + + assert response.version == "version_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.status == cluster_service.NodePool.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.pod_ipv4_cidr_size == 1856 + + +def test_get_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: + call.return_value = cluster_service.NodePool() + + client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.NodePool() + ) + + await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_node_pool_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" + + +def test_get_node_pool_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.NodePool() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + +def test_create_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_create_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_create_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_node_pool), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_node_pool_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") + assert args[0].parent == "parent_value" + + +def test_create_node_pool_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + parent="parent_value", + ) + + +def test_delete_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_delete_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_delete_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_node_pool), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_node_pool_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" + + +def test_delete_node_pool_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + +def test_rollback_node_pool_upgrade(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_rollback_node_pool_upgrade_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_rollback_node_pool_upgrade_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.rollback_node_pool_upgrade( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" + + +def test_rollback_node_pool_upgrade_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.rollback_node_pool_upgrade( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + name="name_value", + ) + + +def test_set_node_pool_management(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolManagementRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_management), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolManagementRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_management), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_node_pool_management_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_management), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_node_pool_management_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_management), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_labels(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLabelsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_labels), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_labels_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLabelsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_labels), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_labels_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_labels), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_labels_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_labels), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_legacy_abac(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLegacyAbacRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLegacyAbacRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_legacy_abac), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_legacy_abac_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_legacy_abac_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_legacy_abac), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_legacy_abac_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_legacy_abac( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True + assert args[0].name == "name_value" + + +def test_set_legacy_abac_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_legacy_abac), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_legacy_abac( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + name="name_value", + ) + + +def test_start_ip_rotation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.StartIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.StartIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_start_ip_rotation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.start_ip_rotation), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_start_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.start_ip_rotation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_start_ip_rotation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.start_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +def test_start_ip_rotation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.start_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +def test_complete_ip_rotation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CompleteIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CompleteIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_complete_ip_rotation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.complete_ip_rotation), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.complete_ip_rotation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_complete_ip_rotation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.complete_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +def test_complete_ip_rotation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.complete_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + name="name_value", + ) + + +def test_set_node_pool_size(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolSizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_size), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolSizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_size), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_node_pool_size_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_size), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_node_pool_size_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_size), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_network_policy(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNetworkPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_network_policy_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNetworkPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_network_policy_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_network_policy), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_network_policy_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_network_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_network_policy_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_network_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ) + assert args[0].name == "name_value" + + +def test_set_network_policy_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_network_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ) + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + name="name_value", + ) + + +def test_set_maintenance_policy(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMaintenancePolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMaintenancePolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_maintenance_policy_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_maintenance_policy), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_maintenance_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_maintenance_policy_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_maintenance_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ) + assert args[0].name == "name_value" + + +def test_set_maintenance_policy_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_maintenance_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ) + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + name="name_value", + ) + + +def test_list_usable_subnetworks(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListUsableSubnetworksRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListUsableSubnetworksRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListUsableSubnetworksResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_usable_subnetworks_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + call.return_value = cluster_service.ListUsableSubnetworksResponse() + + client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListUsableSubnetworksResponse() + ) + + await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_usable_subnetworks_pager(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_usable_subnetworks(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) for i in results) + + +def test_list_usable_subnetworks_pages(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = list(client.list_usable_subnetworks(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pager(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_usable_subnetworks(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) for i in responses) + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pages(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_usable_subnetworks(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = ClusterManagerClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterManagerGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.ClusterManagerGrpcTransport,) + + +def test_cluster_manager_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.ClusterManagerTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_cluster_manager_base_transport(): + # Instantiate the base transport. + transport = transports.ClusterManagerTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_clusters", + "get_cluster", + "create_cluster", + "update_cluster", + "update_node_pool", + "set_node_pool_autoscaling", + "set_logging_service", + "set_monitoring_service", + "set_addons_config", + "set_locations", + "update_master", + "set_master_auth", + "delete_cluster", + "list_operations", + "get_operation", + "cancel_operation", + "get_server_config", + "list_node_pools", + "get_node_pool", + "create_node_pool", + "delete_node_pool", + "rollback_node_pool_upgrade", + "set_node_pool_management", + "set_labels", + "set_legacy_abac", + "start_ip_rotation", + "complete_ip_rotation", + "set_node_pool_size", + "set_network_policy", + "set_maintenance_policy", + "list_usable_subnetworks", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_cluster_manager_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(auth, "load_credentials_from_file") as load_creds: + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport( + credentials_file="credentials.json", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ) + + +def test_cluster_manager_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + ClusterManagerClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",) + ) + + +def test_cluster_manager_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.ClusterManagerGrpcTransport(host="squid.clam.whelk") + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",) + ) + + +def test_cluster_manager_host_no_port(): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="container.googleapis.com" + ), + ) + assert client._transport._host == "container.googleapis.com:443" + + +def test_cluster_manager_host_with_port(): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="container.googleapis.com:8000" + ), + ) + assert client._transport._host == "container.googleapis.com:8000" + + +def test_cluster_manager_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_cluster_manager_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_manager_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_manager_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel diff --git a/google/cloud/container_v1/gapic/transports/__init__.py b/tests/unit/gapic/container_v1beta1/__init__.py similarity index 100% rename from google/cloud/container_v1/gapic/transports/__init__.py rename to tests/unit/gapic/container_v1beta1/__init__.py diff --git a/tests/unit/gapic/container_v1beta1/test_cluster_manager.py b/tests/unit/gapic/container_v1beta1/test_cluster_manager.py new file mode 100644 index 00000000..2c253f0f --- /dev/null +++ b/tests/unit/gapic/container_v1beta1/test_cluster_manager.py @@ -0,0 +1,8787 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.container_v1beta1.services.cluster_manager import ( + ClusterManagerAsyncClient, +) +from google.cloud.container_v1beta1.services.cluster_manager import ClusterManagerClient +from google.cloud.container_v1beta1.services.cluster_manager import pagers +from google.cloud.container_v1beta1.services.cluster_manager import transports +from google.cloud.container_v1beta1.types import cluster_service +from google.oauth2 import service_account +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterManagerClient._get_default_mtls_endpoint(None) is None + assert ( + ClusterManagerClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [ClusterManagerClient, ClusterManagerAsyncClient] +) +def test_cluster_manager_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "container.googleapis.com:443" + + +def test_cluster_manager_client_get_transport_class(): + transport = ClusterManagerClient.get_transport_class() + assert transport == transports.ClusterManagerGrpcTransport + + transport = ClusterManagerClient.get_transport_class("grpc") + assert transport == transports.ClusterManagerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_manager_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterManagerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterManagerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + os.environ["GOOGLE_API_USE_MTLS"] = "never" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + os.environ["GOOGLE_API_USE_MTLS"] = "always" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + os.environ["GOOGLE_API_USE_MTLS"] = "auto" + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + os.environ["GOOGLE_API_USE_MTLS"] = "auto" + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + os.environ["GOOGLE_API_USE_MTLS"] = "auto" + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported" + with pytest.raises(MutualTLSChannelError): + client = client_class() + + del os.environ["GOOGLE_API_USE_MTLS"] + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_manager_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_manager_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + ) + + +def test_cluster_manager_client_client_options_from_dict(): + with mock.patch( + "google.cloud.container_v1beta1.services.cluster_manager.transports.ClusterManagerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ClusterManagerClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + ) + + +def test_list_clusters(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListClustersRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse( + missing_zones=["missing_zones_value"], + ) + + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListClustersRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListClustersResponse(missing_zones=["missing_zones_value"],) + ) + + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +def test_list_clusters_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + call.return_value = cluster_service.ListClustersResponse() + + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListClustersResponse() + ) + + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_clusters_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +def test_list_clusters_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + cluster_service.ListClustersRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + cluster_service.ListClustersRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +def test_get_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster( + name="name_value", + description="description_value", + initial_node_count=1911, + logging_service="logging_service_value", + monitoring_service="monitoring_service_value", + network="network_value", + cluster_ipv4_cidr="cluster_ipv4_cidr_value", + subnetwork="subnetwork_value", + locations=["locations_value"], + enable_kubernetes_alpha=True, + label_fingerprint="label_fingerprint_value", + private_cluster=True, + master_ipv4_cidr_block="master_ipv4_cidr_block_value", + self_link="self_link_value", + zone="zone_value", + endpoint="endpoint_value", + initial_cluster_version="initial_cluster_version_value", + current_master_version="current_master_version_value", + current_node_version="current_node_version_value", + create_time="create_time_value", + status=cluster_service.Cluster.Status.PROVISIONING, + status_message="status_message_value", + node_ipv4_cidr_size=1955, + services_ipv4_cidr="services_ipv4_cidr_value", + instance_group_urls=["instance_group_urls_value"], + current_node_count=1936, + expire_time="expire_time_value", + location="location_value", + enable_tpu=True, + tpu_ipv4_cidr_block="tpu_ipv4_cidr_block_value", + ) + + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.initial_node_count == 1911 + + assert response.logging_service == "logging_service_value" + + assert response.monitoring_service == "monitoring_service_value" + + assert response.network == "network_value" + + assert response.cluster_ipv4_cidr == "cluster_ipv4_cidr_value" + + assert response.subnetwork == "subnetwork_value" + + assert response.locations == ["locations_value"] + + assert response.enable_kubernetes_alpha is True + + assert response.label_fingerprint == "label_fingerprint_value" + + assert response.private_cluster is True + + assert response.master_ipv4_cidr_block == "master_ipv4_cidr_block_value" + + assert response.self_link == "self_link_value" + + assert response.zone == "zone_value" + + assert response.endpoint == "endpoint_value" + + assert response.initial_cluster_version == "initial_cluster_version_value" + + assert response.current_master_version == "current_master_version_value" + + assert response.current_node_version == "current_node_version_value" + + assert response.create_time == "create_time_value" + + assert response.status == cluster_service.Cluster.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.node_ipv4_cidr_size == 1955 + + assert response.services_ipv4_cidr == "services_ipv4_cidr_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.current_node_count == 1936 + + assert response.expire_time == "expire_time_value" + + assert response.location == "location_value" + + assert response.enable_tpu is True + + assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value" + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Cluster( + name="name_value", + description="description_value", + initial_node_count=1911, + logging_service="logging_service_value", + monitoring_service="monitoring_service_value", + network="network_value", + cluster_ipv4_cidr="cluster_ipv4_cidr_value", + subnetwork="subnetwork_value", + locations=["locations_value"], + enable_kubernetes_alpha=True, + label_fingerprint="label_fingerprint_value", + private_cluster=True, + master_ipv4_cidr_block="master_ipv4_cidr_block_value", + self_link="self_link_value", + zone="zone_value", + endpoint="endpoint_value", + initial_cluster_version="initial_cluster_version_value", + current_master_version="current_master_version_value", + current_node_version="current_node_version_value", + create_time="create_time_value", + status=cluster_service.Cluster.Status.PROVISIONING, + status_message="status_message_value", + node_ipv4_cidr_size=1955, + services_ipv4_cidr="services_ipv4_cidr_value", + instance_group_urls=["instance_group_urls_value"], + current_node_count=1936, + expire_time="expire_time_value", + location="location_value", + enable_tpu=True, + tpu_ipv4_cidr_block="tpu_ipv4_cidr_block_value", + ) + ) + + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.initial_node_count == 1911 + + assert response.logging_service == "logging_service_value" + + assert response.monitoring_service == "monitoring_service_value" + + assert response.network == "network_value" + + assert response.cluster_ipv4_cidr == "cluster_ipv4_cidr_value" + + assert response.subnetwork == "subnetwork_value" + + assert response.locations == ["locations_value"] + + assert response.enable_kubernetes_alpha is True + + assert response.label_fingerprint == "label_fingerprint_value" + + assert response.private_cluster is True + + assert response.master_ipv4_cidr_block == "master_ipv4_cidr_block_value" + + assert response.self_link == "self_link_value" + + assert response.zone == "zone_value" + + assert response.endpoint == "endpoint_value" + + assert response.initial_cluster_version == "initial_cluster_version_value" + + assert response.current_master_version == "current_master_version_value" + + assert response.current_node_version == "current_node_version_value" + + assert response.create_time == "create_time_value" + + assert response.status == cluster_service.Cluster.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.node_ipv4_cidr_size == 1955 + + assert response.services_ipv4_cidr == "services_ipv4_cidr_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.current_node_count == 1936 + + assert response.expire_time == "expire_time_value" + + assert response.location == "location_value" + + assert response.enable_tpu is True + + assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value" + + +def test_get_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + call.return_value = cluster_service.Cluster() + + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Cluster() + ) + + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +def test_get_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + cluster_service.GetClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Cluster() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + cluster_service.GetClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +def test_create_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_create_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") + + +def test_create_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster=cluster_service.Cluster(name="name_value"), + ) + + +def test_update_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_update_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_update_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ) + + +def test_update_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + update=cluster_service.ClusterUpdate( + desired_node_version="desired_node_version_value" + ), + ) + + +def test_update_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_update_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_update_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_node_pool), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_node_pool_autoscaling(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolAutoscalingRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolAutoscalingRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_node_pool_autoscaling_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_autoscaling), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_logging_service(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLoggingServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_logging_service_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLoggingServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_logging_service_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_logging_service), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_logging_service_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_logging_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_logging_service_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_logging_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" + + +def test_set_logging_service_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + ) + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_logging_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_logging_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + logging_service="logging_service_value", + ) + + +def test_set_monitoring_service(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMonitoringServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMonitoringServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_monitoring_service_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_monitoring_service), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_monitoring_service_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_monitoring_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_monitoring_service_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_monitoring_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" + + +def test_set_monitoring_service_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + ) + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_monitoring_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_monitoring_service( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + monitoring_service="monitoring_service_value", + ) + + +def test_set_addons_config(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetAddonsConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_addons_config_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetAddonsConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_addons_config_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_addons_config), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_addons_config_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_addons_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_addons_config_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_addons_config( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ) + + +def test_set_addons_config_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + ) + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_addons_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_addons_config( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ) + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + addons_config=cluster_service.AddonsConfig( + http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) + ), + ) + + +def test_set_locations(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_locations_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_locations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_locations_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_locations), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_locations_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_locations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_locations_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_locations( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] + + +def test_set_locations_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_locations( + cluster_service.SetLocationsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + ) + + +@pytest.mark.asyncio +async def test_set_locations_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_locations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_locations( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] + + +@pytest.mark.asyncio +async def test_set_locations_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_locations( + cluster_service.SetLocationsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + locations=["locations_value"], + ) + + +def test_update_master(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateMasterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_master), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_update_master_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.UpdateMasterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_master), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_update_master_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_master), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_master_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_master), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_update_master_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_master), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_master( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" + + +def test_update_master_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_master( + cluster_service.UpdateMasterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + ) + + +@pytest.mark.asyncio +async def test_update_master_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_master), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_master( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" + + +@pytest.mark.asyncio +async def test_update_master_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_master( + cluster_service.UpdateMasterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + master_version="master_version_value", + ) + + +def test_set_master_auth(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMasterAuthRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_master_auth_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMasterAuthRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_master_auth), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_master_auth_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_master_auth_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_master_auth), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_delete_cluster_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +def test_delete_cluster_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +def test_list_operations(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse( + missing_zones=["missing_zones_value"], + ) + + response = client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListOperationsResponse( + missing_zones=["missing_zones_value"], + ) + ) + + response = await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + + assert response.missing_zones == ["missing_zones_value"] + + +def test_list_operations_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_operations), "__call__") as call: + call.return_value = cluster_service.ListOperationsResponse() + + client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_operations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListOperationsResponse() + ) + + await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_operations_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_operations( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +def test_list_operations_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_operations( + cluster_service.ListOperationsRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +@pytest.mark.asyncio +async def test_list_operations_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListOperationsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_operations( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +@pytest.mark.asyncio +async def test_list_operations_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_operations( + cluster_service.ListOperationsRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +def test_get_operation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_get_operation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_operation), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_operation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_operation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + + +def test_get_operation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_operation( + cluster_service.GetOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + +@pytest.mark.asyncio +async def test_get_operation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + + +@pytest.mark.asyncio +async def test_get_operation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_operation( + cluster_service.GetOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + +def test_cancel_operation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.cancel_operation), "__call__" + ) as call: + call.return_value = None + + client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_operation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_cancel_operation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + + +def test_cancel_operation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_operation( + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id="project_id_value", + zone="zone_value", + operation_id="operation_id_value", + ) + + +def test_get_server_config(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetServerConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig( + default_cluster_version="default_cluster_version_value", + valid_node_versions=["valid_node_versions_value"], + default_image_type="default_image_type_value", + valid_image_types=["valid_image_types_value"], + valid_master_versions=["valid_master_versions_value"], + ) + + response = client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + + assert response.default_cluster_version == "default_cluster_version_value" + + assert response.valid_node_versions == ["valid_node_versions_value"] + + assert response.default_image_type == "default_image_type_value" + + assert response.valid_image_types == ["valid_image_types_value"] + + assert response.valid_master_versions == ["valid_master_versions_value"] + + +@pytest.mark.asyncio +async def test_get_server_config_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetServerConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ServerConfig( + default_cluster_version="default_cluster_version_value", + valid_node_versions=["valid_node_versions_value"], + default_image_type="default_image_type_value", + valid_image_types=["valid_image_types_value"], + valid_master_versions=["valid_master_versions_value"], + ) + ) + + response = await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + + assert response.default_cluster_version == "default_cluster_version_value" + + assert response.valid_node_versions == ["valid_node_versions_value"] + + assert response.default_image_type == "default_image_type_value" + + assert response.valid_image_types == ["valid_image_types_value"] + + assert response.valid_master_versions == ["valid_master_versions_value"] + + +def test_get_server_config_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_server_config), "__call__" + ) as call: + call.return_value = cluster_service.ServerConfig() + + client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_server_config_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_server_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ServerConfig() + ) + + await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_server_config_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_server_config( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +def test_get_server_config_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_server_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ServerConfig() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_server_config( + project_id="project_id_value", zone="zone_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id="project_id_value", + zone="zone_value", + ) + + +def test_list_node_pools(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListNodePoolsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + response = client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +@pytest.mark.asyncio +async def test_list_node_pools_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListNodePoolsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_node_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListNodePoolsResponse() + ) + + response = await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +def test_list_node_pools_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: + call.return_value = cluster_service.ListNodePoolsResponse() + + client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_node_pools_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_node_pools), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListNodePoolsResponse() + ) + + await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_node_pools_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_node_pools( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +def test_list_node_pools_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_node_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListNodePoolsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_node_pools( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +def test_get_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool( + name="name_value", + initial_node_count=1911, + self_link="self_link_value", + version="version_value", + instance_group_urls=["instance_group_urls_value"], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message="status_message_value", + pod_ipv4_cidr_size=1856, + ) + + response = client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + + assert response.name == "name_value" + + assert response.initial_node_count == 1911 + + assert response.self_link == "self_link_value" + + assert response.version == "version_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.status == cluster_service.NodePool.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.pod_ipv4_cidr_size == 1856 + + +@pytest.mark.asyncio +async def test_get_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.GetNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.NodePool( + name="name_value", + initial_node_count=1911, + self_link="self_link_value", + version="version_value", + instance_group_urls=["instance_group_urls_value"], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message="status_message_value", + pod_ipv4_cidr_size=1856, + ) + ) + + response = await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + + assert response.name == "name_value" + + assert response.initial_node_count == 1911 + + assert response.self_link == "self_link_value" + + assert response.version == "version_value" + + assert response.instance_group_urls == ["instance_group_urls_value"] + + assert response.status == cluster_service.NodePool.Status.PROVISIONING + + assert response.status_message == "status_message_value" + + assert response.pod_ipv4_cidr_size == 1856 + + +def test_get_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: + call.return_value = cluster_service.NodePool() + + client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.NodePool() + ) + + await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_node_pool_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + + +def test_get_node_pool_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.NodePool() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + +def test_create_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_create_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CreateNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_create_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_node_pool), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_node_pool_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") + + +def test_create_node_pool_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool=cluster_service.NodePool(name="name_value"), + ) + + +def test_delete_node_pool(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_delete_node_pool_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.DeleteNodePoolRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_delete_node_pool_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_node_pool), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_node_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_node_pool_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + + +def test_delete_node_pool_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_node_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_node_pool( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + +def test_rollback_node_pool_upgrade(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_rollback_node_pool_upgrade_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_rollback_node_pool_upgrade_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.rollback_node_pool_upgrade( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + + +def test_rollback_node_pool_upgrade_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.rollback_node_pool_upgrade), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.rollback_node_pool_upgrade( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + ) + + +def test_set_node_pool_management(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolManagementRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_management), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolManagementRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_management), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_node_pool_management_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_management), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_node_pool_management_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_management), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_node_pool_management_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_management), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_node_pool_management( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) + + +def test_set_node_pool_management_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_node_pool_management( + cluster_service.SetNodePoolManagementRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + +@pytest.mark.asyncio +async def test_set_node_pool_management_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_management), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_node_pool_management( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) + + +@pytest.mark.asyncio +async def test_set_node_pool_management_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_node_pool_management( + cluster_service.SetNodePoolManagementRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + node_pool_id="node_pool_id_value", + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + +def test_set_labels(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLabelsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_labels), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_labels_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLabelsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_labels), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_labels_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_labels), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_labels_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_labels), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_labels_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_labels), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_labels( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + resource_labels={"key_value": "value_value"}, + label_fingerprint="label_fingerprint_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].resource_labels == {"key_value": "value_value"} + assert args[0].label_fingerprint == "label_fingerprint_value" + + +def test_set_labels_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + cluster_service.SetLabelsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + resource_labels={"key_value": "value_value"}, + label_fingerprint="label_fingerprint_value", + ) + + +@pytest.mark.asyncio +async def test_set_labels_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_labels), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_labels( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + resource_labels={"key_value": "value_value"}, + label_fingerprint="label_fingerprint_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].resource_labels == {"key_value": "value_value"} + assert args[0].label_fingerprint == "label_fingerprint_value" + + +@pytest.mark.asyncio +async def test_set_labels_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_labels( + cluster_service.SetLabelsRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + resource_labels={"key_value": "value_value"}, + label_fingerprint="label_fingerprint_value", + ) + + +def test_set_legacy_abac(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLegacyAbacRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetLegacyAbacRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_legacy_abac), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_legacy_abac_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: + call.return_value = cluster_service.Operation() + + client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_legacy_abac_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_legacy_abac), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_legacy_abac_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_legacy_abac( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True + + +def test_set_legacy_abac_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + ) + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_legacy_abac), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_legacy_abac( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + enabled=True, + ) + + +def test_start_ip_rotation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.StartIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.StartIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_start_ip_rotation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.start_ip_rotation), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_start_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.start_ip_rotation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_start_ip_rotation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.start_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +def test_start_ip_rotation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.start_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.start_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +def test_complete_ip_rotation(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CompleteIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.CompleteIPRotationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_complete_ip_rotation_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.complete_ip_rotation), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.complete_ip_rotation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_complete_ip_rotation_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.complete_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +def test_complete_ip_rotation_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.complete_ip_rotation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.complete_ip_rotation( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + ) + + +def test_set_node_pool_size(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolSizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_size), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNodePoolSizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_size), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_node_pool_size_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_node_pool_size), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_node_pool_size_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_node_pool_size), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_network_policy(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNetworkPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_network_policy_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetNetworkPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_network_policy_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_network_policy), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_network_policy_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_network_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_network_policy_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_network_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ) + + +def test_set_network_policy_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + ) + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_network_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_network_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ) + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + network_policy=cluster_service.NetworkPolicy( + provider=cluster_service.NetworkPolicy.Provider.CALICO + ), + ) + + +def test_set_maintenance_policy(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMaintenancePolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + + response = client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.SetMaintenancePolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation( + name="name_value", + zone="zone_value", + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail="detail_value", + status_message="status_message_value", + self_link="self_link_value", + target_link="target_link_value", + location="location_value", + start_time="start_time_value", + end_time="end_time_value", + ) + ) + + response = await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + + assert response.name == "name_value" + + assert response.zone == "zone_value" + + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + + assert response.status == cluster_service.Operation.Status.PENDING + + assert response.detail == "detail_value" + + assert response.status_message == "status_message_value" + + assert response.self_link == "self_link_value" + + assert response.target_link == "target_link_value" + + assert response.location == "location_value" + + assert response.start_time == "start_time_value" + + assert response.end_time == "end_time_value" + + +def test_set_maintenance_policy_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_maintenance_policy), "__call__" + ) as call: + call.return_value = cluster_service.Operation() + + client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_maintenance_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + + await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_set_maintenance_policy_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_maintenance_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ) + + +def test_set_maintenance_policy_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + ) + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.set_maintenance_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.Operation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_maintenance_policy( + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ) + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id="project_id_value", + zone="zone_value", + cluster_id="cluster_id_value", + maintenance_policy=cluster_service.MaintenancePolicy( + window=cluster_service.MaintenanceWindow( + daily_maintenance_window=cluster_service.DailyMaintenanceWindow( + start_time="start_time_value" + ) + ) + ), + ) + + +def test_list_usable_subnetworks(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListUsableSubnetworksRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListUsableSubnetworksRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListUsableSubnetworksResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_usable_subnetworks_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + call.return_value = cluster_service.ListUsableSubnetworksResponse() + + client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListUsableSubnetworksResponse() + ) + + await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_usable_subnetworks_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_usable_subnetworks(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_usable_subnetworks_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_usable_subnetworks( + cluster_service.ListUsableSubnetworksRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListUsableSubnetworksResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_usable_subnetworks(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_usable_subnetworks( + cluster_service.ListUsableSubnetworksRequest(), parent="parent_value", + ) + + +def test_list_usable_subnetworks_pager(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_usable_subnetworks(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) for i in results) + + +def test_list_usable_subnetworks_pages(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_usable_subnetworks), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = list(client.list_usable_subnetworks(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pager(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_usable_subnetworks(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) for i in responses) + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pages(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_usable_subnetworks), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token="abc", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], next_page_token="def", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[cluster_service.UsableSubnetwork(),], + next_page_token="ghi", + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_usable_subnetworks(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_list_locations(transport: str = "grpc"): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListLocationsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListLocationsResponse) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = ClusterManagerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cluster_service.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_locations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListLocationsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListLocationsResponse) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_locations_field_headers(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListLocationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_locations), "__call__") as call: + call.return_value = cluster_service.ListLocationsResponse() + + client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListLocationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_locations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListLocationsResponse() + ) + + await client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_locations_flattened(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListLocationsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_locations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_locations_flattened_error(): + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_locations( + cluster_service.ListLocationsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_locations_flattened_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_locations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListLocationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cluster_service.ListLocationsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_locations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_locations_flattened_error_async(): + client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_locations( + cluster_service.ListLocationsRequest(), parent="parent_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = ClusterManagerClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterManagerGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.ClusterManagerGrpcTransport,) + + +def test_cluster_manager_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.ClusterManagerTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_cluster_manager_base_transport(): + # Instantiate the base transport. + transport = transports.ClusterManagerTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_clusters", + "get_cluster", + "create_cluster", + "update_cluster", + "update_node_pool", + "set_node_pool_autoscaling", + "set_logging_service", + "set_monitoring_service", + "set_addons_config", + "set_locations", + "update_master", + "set_master_auth", + "delete_cluster", + "list_operations", + "get_operation", + "cancel_operation", + "get_server_config", + "list_node_pools", + "get_node_pool", + "create_node_pool", + "delete_node_pool", + "rollback_node_pool_upgrade", + "set_node_pool_management", + "set_labels", + "set_legacy_abac", + "start_ip_rotation", + "complete_ip_rotation", + "set_node_pool_size", + "set_network_policy", + "set_maintenance_policy", + "list_usable_subnetworks", + "list_locations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_cluster_manager_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(auth, "load_credentials_from_file") as load_creds: + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport( + credentials_file="credentials.json", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ) + + +def test_cluster_manager_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + ClusterManagerClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",) + ) + + +def test_cluster_manager_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.ClusterManagerGrpcTransport(host="squid.clam.whelk") + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",) + ) + + +def test_cluster_manager_host_no_port(): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="container.googleapis.com" + ), + ) + assert client._transport._host == "container.googleapis.com:443" + + +def test_cluster_manager_host_with_port(): + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="container.googleapis.com:8000" + ), + ) + assert client._transport._host == "container.googleapis.com:8000" + + +def test_cluster_manager_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_cluster_manager_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_manager_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_manager_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + ) + assert transport.grpc_channel == mock_grpc_channel diff --git a/tests/unit/gapic/v1/test_cluster_manager_client_v1.py b/tests/unit/gapic/v1/test_cluster_manager_client_v1.py deleted file mode 100644 index 5468b7a0..00000000 --- a/tests/unit/gapic/v1/test_cluster_manager_client_v1.py +++ /dev/null @@ -1,1688 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import container_v1 -from google.cloud.container_v1 import enums -from google.cloud.container_v1.proto import cluster_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestClusterManagerClient(object): - def test_delete_cluster(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.delete_cluster() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.DeleteClusterRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.delete_cluster() - - def test_delete_node_pool(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.delete_node_pool() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.DeleteNodePoolRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.delete_node_pool() - - def test_list_clusters(self): - # Setup Expected Response - expected_response = {} - expected_response = cluster_service_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.list_clusters() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListClustersRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.list_clusters() - - def test_get_cluster(self): - # Setup Expected Response - name = "name3373707" - description = "description-1724546052" - initial_node_count = 1682564205 - logging_service = "loggingService-1700501035" - monitoring_service = "monitoringService1469270462" - network = "network1843485230" - cluster_ipv4_cidr = "clusterIpv4Cidr-141875831" - subnetwork = "subnetwork-1302785042" - enable_kubernetes_alpha = False - label_fingerprint = "labelFingerprint714995737" - self_link = "selfLink-1691268851" - zone = "zone3744684" - endpoint = "endpoint1741102485" - initial_cluster_version = "initialClusterVersion-276373352" - current_master_version = "currentMasterVersion-920953983" - current_node_version = "currentNodeVersion-407476063" - create_time = "createTime-493574096" - status_message = "statusMessage-239442758" - node_ipv4_cidr_size = 1181176815 - services_ipv4_cidr = "servicesIpv4Cidr1966438125" - current_node_count = 178977560 - expire_time = "expireTime-96179731" - location = "location1901043637" - enable_tpu = False - tpu_ipv4_cidr_block = "tpuIpv4CidrBlock1137906646" - expected_response = { - "name": name, - "description": description, - "initial_node_count": initial_node_count, - "logging_service": logging_service, - "monitoring_service": monitoring_service, - "network": network, - "cluster_ipv4_cidr": cluster_ipv4_cidr, - "subnetwork": subnetwork, - "enable_kubernetes_alpha": enable_kubernetes_alpha, - "label_fingerprint": label_fingerprint, - "self_link": self_link, - "zone": zone, - "endpoint": endpoint, - "initial_cluster_version": initial_cluster_version, - "current_master_version": current_master_version, - "current_node_version": current_node_version, - "create_time": create_time, - "status_message": status_message, - "node_ipv4_cidr_size": node_ipv4_cidr_size, - "services_ipv4_cidr": services_ipv4_cidr, - "current_node_count": current_node_count, - "expire_time": expire_time, - "location": location, - "enable_tpu": enable_tpu, - "tpu_ipv4_cidr_block": tpu_ipv4_cidr_block, - } - expected_response = cluster_service_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.get_cluster() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetClusterRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.get_cluster() - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - cluster = {} - - response = client.create_cluster(cluster) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CreateClusterRequest(cluster=cluster) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - cluster = {} - - with pytest.raises(CustomException): - client.create_cluster(cluster) - - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - update = {} - - response = client.update_cluster(update) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.UpdateClusterRequest(update=update) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - update = {} - - with pytest.raises(CustomException): - client.update_cluster(update) - - def test_update_node_pool(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - node_version = "nodeVersion1790136219" - image_type = "imageType-1442758754" - - response = client.update_node_pool(node_version, image_type) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.UpdateNodePoolRequest( - node_version=node_version, image_type=image_type - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - node_version = "nodeVersion1790136219" - image_type = "imageType-1442758754" - - with pytest.raises(CustomException): - client.update_node_pool(node_version, image_type) - - def test_set_node_pool_autoscaling(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - autoscaling = {} - - response = client.set_node_pool_autoscaling(autoscaling) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNodePoolAutoscalingRequest( - autoscaling=autoscaling - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_node_pool_autoscaling_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - autoscaling = {} - - with pytest.raises(CustomException): - client.set_node_pool_autoscaling(autoscaling) - - def test_set_logging_service(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - logging_service = "loggingService-1700501035" - - response = client.set_logging_service(logging_service) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLoggingServiceRequest( - logging_service=logging_service - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_logging_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - logging_service = "loggingService-1700501035" - - with pytest.raises(CustomException): - client.set_logging_service(logging_service) - - def test_set_monitoring_service(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - monitoring_service = "monitoringService1469270462" - - response = client.set_monitoring_service(monitoring_service) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetMonitoringServiceRequest( - monitoring_service=monitoring_service - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_monitoring_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - monitoring_service = "monitoringService1469270462" - - with pytest.raises(CustomException): - client.set_monitoring_service(monitoring_service) - - def test_set_addons_config(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - addons_config = {} - - response = client.set_addons_config(addons_config) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetAddonsConfigRequest( - addons_config=addons_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_addons_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - addons_config = {} - - with pytest.raises(CustomException): - client.set_addons_config(addons_config) - - def test_set_locations(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - locations = [] - - response = client.set_locations(locations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLocationsRequest(locations=locations) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_locations_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - locations = [] - - with pytest.raises(CustomException): - client.set_locations(locations) - - def test_update_master(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - master_version = "masterVersion-2139460613" - - response = client.update_master(master_version) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.UpdateMasterRequest( - master_version=master_version - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_master_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - master_version = "masterVersion-2139460613" - - with pytest.raises(CustomException): - client.update_master(master_version) - - def test_set_master_auth(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - action = enums.SetMasterAuthRequest.Action.UNKNOWN - update = {} - - response = client.set_master_auth(action, update) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetMasterAuthRequest( - action=action, update=update - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_master_auth_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - action = enums.SetMasterAuthRequest.Action.UNKNOWN - update = {} - - with pytest.raises(CustomException): - client.set_master_auth(action, update) - - def test_list_operations(self): - # Setup Expected Response - expected_response = {} - expected_response = cluster_service_pb2.ListOperationsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.list_operations() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListOperationsRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_operations_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.list_operations() - - def test_get_operation(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.get_operation() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetOperationRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_operation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.get_operation() - - def test_cancel_operation(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - client.cancel_operation() - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CancelOperationRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_cancel_operation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.cancel_operation() - - def test_get_server_config(self): - # Setup Expected Response - default_cluster_version = "defaultClusterVersion111003029" - default_image_type = "defaultImageType-918225828" - expected_response = { - "default_cluster_version": default_cluster_version, - "default_image_type": default_image_type, - } - expected_response = cluster_service_pb2.ServerConfig(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.get_server_config() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetServerConfigRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_server_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.get_server_config() - - def test_list_node_pools(self): - # Setup Expected Response - expected_response = {} - expected_response = cluster_service_pb2.ListNodePoolsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.list_node_pools() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListNodePoolsRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_node_pools_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.list_node_pools() - - def test_get_node_pool(self): - # Setup Expected Response - name = "name3373707" - initial_node_count = 1682564205 - self_link = "selfLink-1691268851" - version = "version351608024" - status_message = "statusMessage-239442758" - pod_ipv4_cidr_size = 1098768716 - expected_response = { - "name": name, - "initial_node_count": initial_node_count, - "self_link": self_link, - "version": version, - "status_message": status_message, - "pod_ipv4_cidr_size": pod_ipv4_cidr_size, - } - expected_response = cluster_service_pb2.NodePool(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.get_node_pool() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetNodePoolRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.get_node_pool() - - def test_create_node_pool(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - node_pool = {} - - response = client.create_node_pool(node_pool) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CreateNodePoolRequest( - node_pool=node_pool - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - node_pool = {} - - with pytest.raises(CustomException): - client.create_node_pool(node_pool) - - def test_rollback_node_pool_upgrade(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.rollback_node_pool_upgrade() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.RollbackNodePoolUpgradeRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_rollback_node_pool_upgrade_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.rollback_node_pool_upgrade() - - def test_set_node_pool_management(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - management = {} - - response = client.set_node_pool_management(management) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNodePoolManagementRequest( - management=management - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_node_pool_management_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - management = {} - - with pytest.raises(CustomException): - client.set_node_pool_management(management) - - def test_set_labels(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - resource_labels = {} - label_fingerprint = "labelFingerprint714995737" - - response = client.set_labels(resource_labels, label_fingerprint) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLabelsRequest( - resource_labels=resource_labels, label_fingerprint=label_fingerprint - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_labels_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - resource_labels = {} - label_fingerprint = "labelFingerprint714995737" - - with pytest.raises(CustomException): - client.set_labels(resource_labels, label_fingerprint) - - def test_set_legacy_abac(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - enabled = False - - response = client.set_legacy_abac(enabled) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLegacyAbacRequest(enabled=enabled) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_legacy_abac_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - enabled = False - - with pytest.raises(CustomException): - client.set_legacy_abac(enabled) - - def test_start_i_p_rotation(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.start_i_p_rotation() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.StartIPRotationRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_start_i_p_rotation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.start_i_p_rotation() - - def test_complete_i_p_rotation(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - response = client.complete_i_p_rotation() - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CompleteIPRotationRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_complete_i_p_rotation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - with pytest.raises(CustomException): - client.complete_i_p_rotation() - - def test_set_node_pool_size(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - node_count = 1539922066 - - response = client.set_node_pool_size(node_count) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNodePoolSizeRequest( - node_count=node_count - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_node_pool_size_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - node_count = 1539922066 - - with pytest.raises(CustomException): - client.set_node_pool_size(node_count) - - def test_set_network_policy(self): - # Setup Expected Response - name = "name3373707" - zone = "zone3744684" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - network_policy = {} - - response = client.set_network_policy(network_policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNetworkPolicyRequest( - network_policy=network_policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_network_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - network_policy = {} - - with pytest.raises(CustomException): - client.set_network_policy(network_policy) - - def test_set_maintenance_policy(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - maintenance_policy = {} - - response = client.set_maintenance_policy( - project_id, zone, cluster_id, maintenance_policy - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetMaintenancePolicyRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - maintenance_policy=maintenance_policy, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_maintenance_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - maintenance_policy = {} - - with pytest.raises(CustomException): - client.set_maintenance_policy( - project_id, zone, cluster_id, maintenance_policy - ) - - def test_list_usable_subnetworks(self): - # Setup Expected Response - next_page_token = "" - subnetworks_element = {} - subnetworks = [subnetworks_element] - expected_response = { - "next_page_token": next_page_token, - "subnetworks": subnetworks, - } - expected_response = cluster_service_pb2.ListUsableSubnetworksResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - paged_list_response = client.list_usable_subnetworks() - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.subnetworks[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListUsableSubnetworksRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_usable_subnetworks_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1.ClusterManagerClient() - - paged_list_response = client.list_usable_subnetworks() - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/gapic/v1beta1/test_cluster_manager_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_cluster_manager_client_v1beta1.py deleted file mode 100644 index 05939347..00000000 --- a/tests/unit/gapic/v1beta1/test_cluster_manager_client_v1beta1.py +++ /dev/null @@ -1,2087 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import container_v1beta1 -from google.cloud.container_v1beta1 import enums -from google.cloud.container_v1beta1.proto import cluster_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestClusterManagerClient(object): - def test_list_clusters(self): - # Setup Expected Response - expected_response = {} - expected_response = cluster_service_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - - response = client.list_clusters(project_id, zone) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListClustersRequest( - project_id=project_id, zone=zone - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - - with pytest.raises(CustomException): - client.list_clusters(project_id, zone) - - def test_get_cluster(self): - # Setup Expected Response - name = "name3373707" - description = "description-1724546052" - initial_node_count = 1682564205 - logging_service = "loggingService-1700501035" - monitoring_service = "monitoringService1469270462" - network = "network1843485230" - cluster_ipv4_cidr = "clusterIpv4Cidr-141875831" - subnetwork = "subnetwork-1302785042" - enable_kubernetes_alpha = False - label_fingerprint = "labelFingerprint714995737" - private_cluster = True - master_ipv4_cidr_block = "masterIpv4CidrBlock-97940801" - self_link = "selfLink-1691268851" - zone_2 = "zone2-696322977" - endpoint = "endpoint1741102485" - initial_cluster_version = "initialClusterVersion-276373352" - current_master_version = "currentMasterVersion-920953983" - current_node_version = "currentNodeVersion-407476063" - create_time = "createTime-493574096" - status_message = "statusMessage-239442758" - node_ipv4_cidr_size = 1181176815 - services_ipv4_cidr = "servicesIpv4Cidr1966438125" - current_node_count = 178977560 - expire_time = "expireTime-96179731" - location = "location1901043637" - enable_tpu = False - tpu_ipv4_cidr_block = "tpuIpv4CidrBlock1137906646" - expected_response = { - "name": name, - "description": description, - "initial_node_count": initial_node_count, - "logging_service": logging_service, - "monitoring_service": monitoring_service, - "network": network, - "cluster_ipv4_cidr": cluster_ipv4_cidr, - "subnetwork": subnetwork, - "enable_kubernetes_alpha": enable_kubernetes_alpha, - "label_fingerprint": label_fingerprint, - "private_cluster": private_cluster, - "master_ipv4_cidr_block": master_ipv4_cidr_block, - "self_link": self_link, - "zone": zone_2, - "endpoint": endpoint, - "initial_cluster_version": initial_cluster_version, - "current_master_version": current_master_version, - "current_node_version": current_node_version, - "create_time": create_time, - "status_message": status_message, - "node_ipv4_cidr_size": node_ipv4_cidr_size, - "services_ipv4_cidr": services_ipv4_cidr, - "current_node_count": current_node_count, - "expire_time": expire_time, - "location": location, - "enable_tpu": enable_tpu, - "tpu_ipv4_cidr_block": tpu_ipv4_cidr_block, - } - expected_response = cluster_service_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - response = client.get_cluster(project_id, zone, cluster_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetClusterRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - with pytest.raises(CustomException): - client.get_cluster(project_id, zone, cluster_id) - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster = {} - - response = client.create_cluster(project_id, zone, cluster) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CreateClusterRequest( - project_id=project_id, zone=zone, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster = {} - - with pytest.raises(CustomException): - client.create_cluster(project_id, zone, cluster) - - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - update = {} - - response = client.update_cluster(project_id, zone, cluster_id, update) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.UpdateClusterRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, update=update - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - update = {} - - with pytest.raises(CustomException): - client.update_cluster(project_id, zone, cluster_id, update) - - def test_update_node_pool(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - node_version = "nodeVersion1790136219" - image_type = "imageType-1442758754" - - response = client.update_node_pool( - project_id, zone, cluster_id, node_pool_id, node_version, image_type - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.UpdateNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - node_version=node_version, - image_type=image_type, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - node_version = "nodeVersion1790136219" - image_type = "imageType-1442758754" - - with pytest.raises(CustomException): - client.update_node_pool( - project_id, zone, cluster_id, node_pool_id, node_version, image_type - ) - - def test_set_node_pool_autoscaling(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - autoscaling = {} - - response = client.set_node_pool_autoscaling( - project_id, zone, cluster_id, node_pool_id, autoscaling - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNodePoolAutoscalingRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - autoscaling=autoscaling, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_node_pool_autoscaling_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - autoscaling = {} - - with pytest.raises(CustomException): - client.set_node_pool_autoscaling( - project_id, zone, cluster_id, node_pool_id, autoscaling - ) - - def test_set_logging_service(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - logging_service = "loggingService-1700501035" - - response = client.set_logging_service( - project_id, zone, cluster_id, logging_service - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLoggingServiceRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - logging_service=logging_service, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_logging_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - logging_service = "loggingService-1700501035" - - with pytest.raises(CustomException): - client.set_logging_service(project_id, zone, cluster_id, logging_service) - - def test_set_monitoring_service(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - monitoring_service = "monitoringService1469270462" - - response = client.set_monitoring_service( - project_id, zone, cluster_id, monitoring_service - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetMonitoringServiceRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - monitoring_service=monitoring_service, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_monitoring_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - monitoring_service = "monitoringService1469270462" - - with pytest.raises(CustomException): - client.set_monitoring_service( - project_id, zone, cluster_id, monitoring_service - ) - - def test_set_addons_config(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - addons_config = {} - - response = client.set_addons_config(project_id, zone, cluster_id, addons_config) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetAddonsConfigRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - addons_config=addons_config, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_addons_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - addons_config = {} - - with pytest.raises(CustomException): - client.set_addons_config(project_id, zone, cluster_id, addons_config) - - def test_set_locations(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - locations = [] - - response = client.set_locations(project_id, zone, cluster_id, locations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLocationsRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, locations=locations - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_locations_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - locations = [] - - with pytest.raises(CustomException): - client.set_locations(project_id, zone, cluster_id, locations) - - def test_update_master(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - master_version = "masterVersion-2139460613" - - response = client.update_master(project_id, zone, cluster_id, master_version) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.UpdateMasterRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - master_version=master_version, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_master_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - master_version = "masterVersion-2139460613" - - with pytest.raises(CustomException): - client.update_master(project_id, zone, cluster_id, master_version) - - def test_set_master_auth(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - action = enums.SetMasterAuthRequest.Action.UNKNOWN - update = {} - - response = client.set_master_auth(project_id, zone, cluster_id, action, update) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetMasterAuthRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - action=action, - update=update, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_master_auth_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - action = enums.SetMasterAuthRequest.Action.UNKNOWN - update = {} - - with pytest.raises(CustomException): - client.set_master_auth(project_id, zone, cluster_id, action, update) - - def test_delete_cluster(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - response = client.delete_cluster(project_id, zone, cluster_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.DeleteClusterRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - with pytest.raises(CustomException): - client.delete_cluster(project_id, zone, cluster_id) - - def test_list_operations(self): - # Setup Expected Response - expected_response = {} - expected_response = cluster_service_pb2.ListOperationsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - - response = client.list_operations(project_id, zone) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListOperationsRequest( - project_id=project_id, zone=zone - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_operations_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - - with pytest.raises(CustomException): - client.list_operations(project_id, zone) - - def test_get_operation(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - operation_id = "operationId-274116877" - - response = client.get_operation(project_id, zone, operation_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetOperationRequest( - project_id=project_id, zone=zone, operation_id=operation_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_operation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - operation_id = "operationId-274116877" - - with pytest.raises(CustomException): - client.get_operation(project_id, zone, operation_id) - - def test_cancel_operation(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - operation_id = "operationId-274116877" - - client.cancel_operation(project_id, zone, operation_id) - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CancelOperationRequest( - project_id=project_id, zone=zone, operation_id=operation_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_cancel_operation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - operation_id = "operationId-274116877" - - with pytest.raises(CustomException): - client.cancel_operation(project_id, zone, operation_id) - - def test_get_server_config(self): - # Setup Expected Response - default_cluster_version = "defaultClusterVersion111003029" - default_image_type = "defaultImageType-918225828" - expected_response = { - "default_cluster_version": default_cluster_version, - "default_image_type": default_image_type, - } - expected_response = cluster_service_pb2.ServerConfig(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - - response = client.get_server_config(project_id, zone) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetServerConfigRequest( - project_id=project_id, zone=zone - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_server_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - - with pytest.raises(CustomException): - client.get_server_config(project_id, zone) - - def test_list_node_pools(self): - # Setup Expected Response - expected_response = {} - expected_response = cluster_service_pb2.ListNodePoolsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - response = client.list_node_pools(project_id, zone, cluster_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListNodePoolsRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_node_pools_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - with pytest.raises(CustomException): - client.list_node_pools(project_id, zone, cluster_id) - - def test_get_node_pool(self): - # Setup Expected Response - name = "name3373707" - initial_node_count = 1682564205 - self_link = "selfLink-1691268851" - version = "version351608024" - status_message = "statusMessage-239442758" - pod_ipv4_cidr_size = 1098768716 - expected_response = { - "name": name, - "initial_node_count": initial_node_count, - "self_link": self_link, - "version": version, - "status_message": status_message, - "pod_ipv4_cidr_size": pod_ipv4_cidr_size, - } - expected_response = cluster_service_pb2.NodePool(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - - response = client.get_node_pool(project_id, zone, cluster_id, node_pool_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.GetNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - - with pytest.raises(CustomException): - client.get_node_pool(project_id, zone, cluster_id, node_pool_id) - - def test_create_node_pool(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool = {} - - response = client.create_node_pool(project_id, zone, cluster_id, node_pool) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CreateNodePoolRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, node_pool=node_pool - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool = {} - - with pytest.raises(CustomException): - client.create_node_pool(project_id, zone, cluster_id, node_pool) - - def test_delete_node_pool(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - - response = client.delete_node_pool(project_id, zone, cluster_id, node_pool_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.DeleteNodePoolRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_node_pool_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - - with pytest.raises(CustomException): - client.delete_node_pool(project_id, zone, cluster_id, node_pool_id) - - def test_rollback_node_pool_upgrade(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - - response = client.rollback_node_pool_upgrade( - project_id, zone, cluster_id, node_pool_id - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.RollbackNodePoolUpgradeRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_rollback_node_pool_upgrade_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - - with pytest.raises(CustomException): - client.rollback_node_pool_upgrade( - project_id, zone, cluster_id, node_pool_id - ) - - def test_set_node_pool_management(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - management = {} - - response = client.set_node_pool_management( - project_id, zone, cluster_id, node_pool_id, management - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNodePoolManagementRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - management=management, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_node_pool_management_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - management = {} - - with pytest.raises(CustomException): - client.set_node_pool_management( - project_id, zone, cluster_id, node_pool_id, management - ) - - def test_set_labels(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - resource_labels = {} - label_fingerprint = "labelFingerprint714995737" - - response = client.set_labels( - project_id, zone, cluster_id, resource_labels, label_fingerprint - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLabelsRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - resource_labels=resource_labels, - label_fingerprint=label_fingerprint, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_labels_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - resource_labels = {} - label_fingerprint = "labelFingerprint714995737" - - with pytest.raises(CustomException): - client.set_labels( - project_id, zone, cluster_id, resource_labels, label_fingerprint - ) - - def test_set_legacy_abac(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - enabled = False - - response = client.set_legacy_abac(project_id, zone, cluster_id, enabled) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetLegacyAbacRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id, enabled=enabled - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_legacy_abac_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - enabled = False - - with pytest.raises(CustomException): - client.set_legacy_abac(project_id, zone, cluster_id, enabled) - - def test_start_i_p_rotation(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - response = client.start_i_p_rotation(project_id, zone, cluster_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.StartIPRotationRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_start_i_p_rotation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - with pytest.raises(CustomException): - client.start_i_p_rotation(project_id, zone, cluster_id) - - def test_complete_i_p_rotation(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - response = client.complete_i_p_rotation(project_id, zone, cluster_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.CompleteIPRotationRequest( - project_id=project_id, zone=zone, cluster_id=cluster_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_complete_i_p_rotation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - - with pytest.raises(CustomException): - client.complete_i_p_rotation(project_id, zone, cluster_id) - - def test_set_node_pool_size(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - node_count = 1539922066 - - response = client.set_node_pool_size( - project_id, zone, cluster_id, node_pool_id, node_count - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNodePoolSizeRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - node_pool_id=node_pool_id, - node_count=node_count, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_node_pool_size_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - node_pool_id = "nodePoolId1043384033" - node_count = 1539922066 - - with pytest.raises(CustomException): - client.set_node_pool_size( - project_id, zone, cluster_id, node_pool_id, node_count - ) - - def test_set_network_policy(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - network_policy = {} - - response = client.set_network_policy( - project_id, zone, cluster_id, network_policy - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetNetworkPolicyRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - network_policy=network_policy, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_network_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - network_policy = {} - - with pytest.raises(CustomException): - client.set_network_policy(project_id, zone, cluster_id, network_policy) - - def test_set_maintenance_policy(self): - # Setup Expected Response - name = "name3373707" - zone_2 = "zone2-696322977" - detail = "detail-1335224239" - status_message = "statusMessage-239442758" - self_link = "selfLink-1691268851" - target_link = "targetLink-2084812312" - location = "location1901043637" - start_time = "startTime-1573145462" - end_time = "endTime1725551537" - expected_response = { - "name": name, - "zone": zone_2, - "detail": detail, - "status_message": status_message, - "self_link": self_link, - "target_link": target_link, - "location": location, - "start_time": start_time, - "end_time": end_time, - } - expected_response = cluster_service_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - maintenance_policy = {} - - response = client.set_maintenance_policy( - project_id, zone, cluster_id, maintenance_policy - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.SetMaintenancePolicyRequest( - project_id=project_id, - zone=zone, - cluster_id=cluster_id, - maintenance_policy=maintenance_policy, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_maintenance_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - project_id = "projectId-1969970175" - zone = "zone3744684" - cluster_id = "clusterId240280960" - maintenance_policy = {} - - with pytest.raises(CustomException): - client.set_maintenance_policy( - project_id, zone, cluster_id, maintenance_policy - ) - - def test_list_usable_subnetworks(self): - # Setup Expected Response - next_page_token = "" - subnetworks_element = {} - subnetworks = [subnetworks_element] - expected_response = { - "next_page_token": next_page_token, - "subnetworks": subnetworks, - } - expected_response = cluster_service_pb2.ListUsableSubnetworksResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - parent = "parent-995424086" - - paged_list_response = client.list_usable_subnetworks(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.subnetworks[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListUsableSubnetworksRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_usable_subnetworks_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - parent = "parent-995424086" - - paged_list_response = client.list_usable_subnetworks(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_list_locations(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = cluster_service_pb2.ListLocationsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup Request - parent = "parent-995424086" - - response = client.list_locations(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cluster_service_pb2.ListLocationsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_locations_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = container_v1beta1.ClusterManagerClient() - - # Setup request - parent = "parent-995424086" - - with pytest.raises(CustomException): - client.list_locations(parent)