diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py index 8a17b892..48eb9a18 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py @@ -30,7 +30,6 @@ _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport - __all__ = ( "AutoscalingPolicyServiceTransport", "AutoscalingPolicyServiceGrpcTransport", diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py index 2d2e2746..736d0870 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py @@ -148,6 +148,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -166,6 +170,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] @@ -192,7 +200,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py index bd9d91a0..dc52d94d 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -193,6 +193,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -211,6 +215,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py index 9aa597b6..a487348a 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py @@ -28,7 +28,6 @@ _transport_registry["grpc"] = ClusterControllerGrpcTransport _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport - __all__ = ( "ClusterControllerTransport", "ClusterControllerGrpcTransport", diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py index 7c8b83c2..ae1b8d78 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py @@ -149,6 +149,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -167,9 +171,14 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None # Run the base constructor. super().__init__( @@ -193,7 +202,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -240,13 +249,11 @@ def operations_client(self) -> operations_v1.OperationsClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsClient( - self.grpc_channel - ) + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_cluster( diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py index 96c998f6..b3b50cf4 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py @@ -194,6 +194,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -212,6 +216,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. @@ -225,6 +233,7 @@ def __init__( ) self._stubs = {} + self._operations_client = None @property def grpc_channel(self) -> aio.Channel: @@ -244,13 +253,13 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_cluster( diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py b/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py index a3d68663..d28c850a 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py @@ -28,7 +28,6 @@ _transport_registry["grpc"] = JobControllerGrpcTransport _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport - __all__ = ( "JobControllerTransport", "JobControllerGrpcTransport", diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py index 504c8d4d..5802abf7 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py @@ -149,6 +149,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -167,9 +171,14 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None # Run the base constructor. super().__init__( @@ -193,7 +202,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -240,13 +249,11 @@ def operations_client(self) -> operations_v1.OperationsClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsClient( - self.grpc_channel - ) + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def submit_job(self) -> Callable[[jobs.SubmitJobRequest], jobs.Job]: diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py index 6c60c089..04011df0 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py @@ -194,6 +194,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -212,6 +216,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. @@ -225,6 +233,7 @@ def __init__( ) self._stubs = {} + self._operations_client = None @property def grpc_channel(self) -> aio.Channel: @@ -244,13 +253,13 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def submit_job(self) -> Callable[[jobs.SubmitJobRequest], Awaitable[jobs.Job]]: diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py index eb32b364..ac44e1f0 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py @@ -30,7 +30,6 @@ _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport - __all__ = ( "WorkflowTemplateServiceTransport", "WorkflowTemplateServiceGrpcTransport", diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py index 36e8cb35..98d84293 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py @@ -150,6 +150,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -168,9 +172,14 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None # Run the base constructor. super().__init__( @@ -194,7 +203,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -241,13 +250,11 @@ def operations_client(self) -> operations_v1.OperationsClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsClient( - self.grpc_channel - ) + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_workflow_template( diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py index c69eaf0f..1024ab1b 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py @@ -195,6 +195,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -213,6 +217,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. @@ -226,6 +234,7 @@ def __init__( ) self._stubs = {} + self._operations_client = None @property def grpc_channel(self) -> aio.Channel: @@ -245,13 +254,13 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_workflow_template( diff --git a/google/cloud/dataproc_v1/types/__init__.py b/google/cloud/dataproc_v1/types/__init__.py index bfcbf982..3c02d690 100644 --- a/google/cloud/dataproc_v1/types/__init__.py +++ b/google/cloud/dataproc_v1/types/__init__.py @@ -109,7 +109,6 @@ DeleteWorkflowTemplateRequest, ) - __all__ = ( "AutoscalingPolicy", "BasicAutoscalingAlgorithm", @@ -121,6 +120,7 @@ "DeleteAutoscalingPolicyRequest", "ListAutoscalingPoliciesRequest", "ListAutoscalingPoliciesResponse", + "Component", "Cluster", "ClusterConfig", "EndpointConfig", diff --git a/google/cloud/dataproc_v1beta2/proto/clusters.proto b/google/cloud/dataproc_v1beta2/proto/clusters.proto index 49e32305..8af436e0 100644 --- a/google/cloud/dataproc_v1beta2/proto/clusters.proto +++ b/google/cloud/dataproc_v1beta2/proto/clusters.proto @@ -30,17 +30,27 @@ option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta option java_multiple_files = true; option java_outer_classname = "ClustersProto"; option java_package = "com.google.cloud.dataproc.v1beta2"; +option (google.api.resource_definition) = { + type: "container.googleapis.com/Cluster" + pattern: "projects/{project}/locations/{location}/clusters/{cluster}" +}; +option (google.api.resource_definition) = { + type: "metastore.googleapis.com/Service" + pattern: "projects/{project}/locations/{location}/services/{service}" +}; // The ClusterControllerService provides methods to manage clusters // of Compute Engine instances. service ClusterController { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Creates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + rpc CreateCluster(CreateClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/projects/{project_id}/regions/{region}/clusters" body: "cluster" @@ -55,12 +65,14 @@ service ClusterController { // Updates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). - rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { + rpc UpdateCluster(UpdateClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" body: "cluster" }; - option (google.api.method_signature) = "project_id, region, cluster_name, cluster, update_mask"; + option (google.api.method_signature) = + "project_id, region, cluster_name, cluster, update_mask"; option (google.longrunning.operation_info) = { response_type: "Cluster" metadata_type: "google.cloud.dataproc.v1beta2.ClusterOperationMetadata" @@ -70,7 +82,8 @@ service ClusterController { // Deletes a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). - rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { + rpc DeleteCluster(DeleteClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { delete: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" }; @@ -105,7 +118,8 @@ service ClusterController { // [Operation.response][google.longrunning.Operation.response] // contains // [Empty][google.protobuf.Empty]. - rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) { + rpc DiagnoseCluster(DiagnoseClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" body: "*" @@ -145,7 +159,8 @@ message Cluster { ClusterStatus status = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The previous cluster status. - repeated ClusterStatus status_history = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated ClusterStatus status_history = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A cluster UUID (Unique Universal Identifier). Dataproc // generates this value when it creates the cluster. @@ -171,38 +186,41 @@ message ClusterConfig { // bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; - // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, - // such as Spark and MapReduce history files. - // If you do not specify a temp bucket, - // Dataproc will determine a Cloud Storage location (US, - // ASIA, or EU) for your cluster's temp bucket according to the - // Compute Engine zone where your cluster is deployed, and then create - // and manage this project-level, per-location bucket. The default bucket has - // a TTL of 90 days, but you can use any TTL (or none) if you specify a - // bucket. + // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs + // data, such as Spark and MapReduce history files. If you do not specify a + // temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or + // EU) for your cluster's temp bucket according to the Compute Engine zone + // where your cluster is deployed, and then create and manage this + // project-level, per-location bucket. The default bucket has a TTL of 90 + // days, but you can use any TTL (or none) if you specify a bucket. string temp_bucket = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The shared Compute Engine config settings for // all instances in a cluster. - GceClusterConfig gce_cluster_config = 8 [(google.api.field_behavior) = OPTIONAL]; + GceClusterConfig gce_cluster_config = 8 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine config settings for // the master instance in a cluster. - InstanceGroupConfig master_config = 9 [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupConfig master_config = 9 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine config settings for // worker instances in a cluster. - InstanceGroupConfig worker_config = 10 [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupConfig worker_config = 10 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine config settings for // additional worker instances in a cluster. - InstanceGroupConfig secondary_worker_config = 12 [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupConfig secondary_worker_config = 12 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The config settings for software inside the cluster. SoftwareConfig software_config = 13 [(google.api.field_behavior) = OPTIONAL]; // Optional. The config setting for auto delete cluster schedule. - LifecycleConfig lifecycle_config = 14 [(google.api.field_behavior) = OPTIONAL]; + LifecycleConfig lifecycle_config = 14 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Commands to execute on each node after config is // completed. By default, executables are run on master and all worker nodes. @@ -217,14 +235,17 @@ message ClusterConfig { // else // ... worker specific actions ... // fi - repeated NodeInitializationAction initialization_actions = 11 [(google.api.field_behavior) = OPTIONAL]; + repeated NodeInitializationAction initialization_actions = 11 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Encryption settings for the cluster. - EncryptionConfig encryption_config = 15 [(google.api.field_behavior) = OPTIONAL]; + EncryptionConfig encryption_config = 15 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Autoscaling config for the policy associated with the cluster. // Cluster does not autoscale if this field is unset. - AutoscalingConfig autoscaling_config = 16 [(google.api.field_behavior) = OPTIONAL]; + AutoscalingConfig autoscaling_config = 16 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Port/endpoint configuration for this cluster EndpointConfig endpoint_config = 17 [(google.api.field_behavior) = OPTIONAL]; @@ -232,11 +253,12 @@ message ClusterConfig { // Optional. Security related configuration. SecurityConfig security_config = 18 [(google.api.field_behavior) = OPTIONAL]; - // Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. - // Setting this is considered mutually exclusive with Compute Engine-based - // options such as `gce_cluster_config`, `master_config`, `worker_config`, - // `secondary_worker_config`, and `autoscaling_config`. - GkeClusterConfig gke_cluster_config = 19 [(google.api.field_behavior) = OPTIONAL]; + // Optional. The Kubernetes Engine config for Dataproc clusters deployed to + // Kubernetes. Setting this is considered mutually exclusive with Compute + // Engine-based options such as `gce_cluster_config`, `master_config`, + // `worker_config`, `secondary_worker_config`, and `autoscaling_config`. + GkeClusterConfig gke_cluster_config = 19 + [(google.api.field_behavior) = OPTIONAL]; } // The GKE config for this cluster. @@ -257,14 +279,16 @@ message GkeClusterConfig { } // Optional. A target for the deployment. - NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(google.api.field_behavior) = OPTIONAL]; + NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 + [(google.api.field_behavior) = OPTIONAL]; } // Endpoint config for this cluster message EndpointConfig { // Output only. The map of port descriptions to URLs. Will only be populated // if enable_http_port_access is true. - map http_ports = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + map http_ports = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. If true, enable http access to specific ports on the cluster // from external sources. Defaults to false. @@ -367,7 +391,8 @@ message GceClusterConfig { // * https://www.googleapis.com/auth/bigtable.admin.table // * https://www.googleapis.com/auth/bigtable.data // * https://www.googleapis.com/auth/devstorage.full_control - repeated string service_account_scopes = 3 [(google.api.field_behavior) = OPTIONAL]; + repeated string service_account_scopes = 3 + [(google.api.field_behavior) = OPTIONAL]; // The Compute Engine tags to add to all instances (see [Tagging // instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). @@ -379,7 +404,8 @@ message GceClusterConfig { map metadata = 5; // Optional. Reservation Affinity for consuming Zonal reservation. - ReservationAffinity reservation_affinity = 11 [(google.api.field_behavior) = OPTIONAL]; + ReservationAffinity reservation_affinity = 11 + [(google.api.field_behavior) = OPTIONAL]; } // The config settings for Compute Engine resources in @@ -412,7 +438,8 @@ message InstanceGroupConfig { // Output only. The list of instance names. Dataproc derives the names // from `cluster_name`, `num_instances`, and the instance group. - repeated string instance_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated string instance_names = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The Compute Engine image resource used for cluster instances. // @@ -468,11 +495,13 @@ message InstanceGroupConfig { // Output only. The config for Compute Engine Instance Group // Manager that manages this group. // This is only used for preemptible instance groups. - ManagedGroupConfig managed_group_config = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + ManagedGroupConfig managed_group_config = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The Compute Engine accelerator configuration for these // instances. - repeated AcceleratorConfig accelerators = 8 [(google.api.field_behavior) = OPTIONAL]; + repeated AcceleratorConfig accelerators = 8 + [(google.api.field_behavior) = OPTIONAL]; // Specifies the minimum cpu platform for the Instance Group. // See [Dataproc -> Minimum CPU @@ -487,7 +516,8 @@ message ManagedGroupConfig { string instance_template_name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The name of the Instance Group Manager for this group. - string instance_group_manager_name = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + string instance_group_manager_name = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Specifies the type and number of accelerator cards attached to the instances @@ -541,27 +571,32 @@ message LifecycleConfig { // deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON // representation of // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json). - google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration idle_delete_ttl = 1 + [(google.api.field_behavior) = OPTIONAL]; // Either the exact time the cluster should be deleted at or // the cluster maximum age. oneof ttl { - // Optional. The time when cluster will be auto-deleted. (see JSON representation of + // Optional. The time when cluster will be auto-deleted. (see JSON + // representation of // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp auto_delete_time = 2 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Timestamp auto_delete_time = 2 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The lifetime duration of cluster. The cluster will be // auto-deleted at the end of this period. Minimum value is 10 minutes; // maximum value is 14 days (see JSON representation of // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Duration auto_delete_ttl = 3 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration auto_delete_ttl = 3 + [(google.api.field_behavior) = OPTIONAL]; } // Output only. The time when cluster became idle (most recent job finished) // and became eligible for deletion due to idleness (see JSON representation // of // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp idle_start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Security related configuration, including encryption, Kerberos, etc. @@ -572,13 +607,14 @@ message SecurityConfig { // Specifies Kerberos related configuration. message KerberosConfig { - // Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set - // this field to true to enable Kerberos on a cluster. + // Optional. Flag to indicate whether to Kerberize the cluster (default: + // false). Set this field to true to enable Kerberos on a cluster. bool enable_kerberos = 1 [(google.api.field_behavior) = OPTIONAL]; // Required. The Cloud Storage URI of a KMS encrypted file containing the root // principal password. - string root_principal_password_uri = 2 [(google.api.field_behavior) = REQUIRED]; + string root_principal_password_uri = 2 + [(google.api.field_behavior) = REQUIRED]; // Required. The uri of the KMS key used to encrypt various sensitive // files. @@ -619,12 +655,14 @@ message KerberosConfig { // Optional. The admin server (IP or hostname) for the remote trusted realm in // a cross realm trust relationship. - string cross_realm_trust_admin_server = 11 [(google.api.field_behavior) = OPTIONAL]; + string cross_realm_trust_admin_server = 11 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Cloud Storage URI of a KMS encrypted file containing the // shared password between the on-cluster Kerberos realm and the remote // trusted realm, in a cross realm trust relationship. - string cross_realm_trust_shared_password_uri = 12 [(google.api.field_behavior) = OPTIONAL]; + string cross_realm_trust_shared_password_uri = 12 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Cloud Storage URI of a KMS encrypted file containing the // master key of the KDC database. @@ -653,7 +691,8 @@ message NodeInitializationAction { // Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. - google.protobuf.Duration execution_timeout = 2 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration execution_timeout = 2 + [(google.api.field_behavior) = OPTIONAL]; } // The status of a cluster and its instances. @@ -715,7 +754,8 @@ message ClusterStatus { // Output only. Time when this state was entered (see JSON representation of // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp state_start_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information that includes // status reported by the agent. @@ -782,10 +822,11 @@ message CreateClusterRequest { Cluster cluster = 2 [(google.api.field_behavior) = REQUIRED]; // Optional. A unique id used to identify the request. If the server - // receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests with the same - // id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend - // is returned. + // receives two + // [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] + // requests with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // It is recommended to always set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -819,7 +860,8 @@ message UpdateClusterRequest { // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). // // Only supported on Dataproc image versions 1.2 and higher. - google.protobuf.Duration graceful_decommission_timeout = 6 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration graceful_decommission_timeout = 6 + [(google.api.field_behavior) = OPTIONAL]; // Required. Specifies the path, relative to `Cluster`, of // the field to update. For example, to change the number of workers @@ -881,13 +923,15 @@ message UpdateClusterRequest { // autoscaling policies // // - google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 4 + [(google.api.field_behavior) = REQUIRED]; // Optional. A unique id used to identify the request. If the server - // receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests with the same - // id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the - // backend is returned. + // receives two + // [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] + // requests with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // It is recommended to always set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -914,10 +958,11 @@ message DeleteClusterRequest { string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A unique id used to identify the request. If the server - // receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests with the same - // id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the - // backend is returned. + // receives two + // [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] + // requests with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // It is recommended to always set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py index 8a17b892..48eb9a18 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py @@ -30,7 +30,6 @@ _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport - __all__ = ( "AutoscalingPolicyServiceTransport", "AutoscalingPolicyServiceGrpcTransport", diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py index ace75125..01896af1 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py @@ -148,6 +148,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -166,6 +170,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] @@ -192,7 +200,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py index f5a39178..cf4811e1 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -193,6 +193,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -211,6 +215,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py index 818d4287..9ed3eff3 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py @@ -51,6 +51,9 @@ class ClusterControllerAsyncClient: DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT + cluster_path = staticmethod(ClusterControllerClient.cluster_path) + parse_cluster_path = staticmethod(ClusterControllerClient.parse_cluster_path) + common_billing_account_path = staticmethod( ClusterControllerClient.common_billing_account_path ) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py index f99564b7..4be2492d 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py @@ -148,6 +148,22 @@ def transport(self) -> ClusterControllerTransport: """ return self._transport + @staticmethod + def cluster_path(project: str, location: str, cluster: str,) -> str: + """Return a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format( + project=project, location=location, cluster=cluster, + ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str, str]: + """Parse a cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Return a fully-qualified billing_account string.""" diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py index 9aa597b6..a487348a 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py @@ -28,7 +28,6 @@ _transport_registry["grpc"] = ClusterControllerGrpcTransport _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport - __all__ = ( "ClusterControllerTransport", "ClusterControllerGrpcTransport", diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py index c8b16361..2af74b62 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py @@ -149,6 +149,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -167,9 +171,14 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None # Run the base constructor. super().__init__( @@ -193,7 +202,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -240,13 +249,11 @@ def operations_client(self) -> operations_v1.OperationsClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsClient( - self.grpc_channel - ) + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_cluster( diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py index 0f17284b..186cc414 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py @@ -194,6 +194,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -212,6 +216,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. @@ -225,6 +233,7 @@ def __init__( ) self._stubs = {} + self._operations_client = None @property def grpc_channel(self) -> aio.Channel: @@ -244,13 +253,13 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_cluster( diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py index a3d68663..d28c850a 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py @@ -28,7 +28,6 @@ _transport_registry["grpc"] = JobControllerGrpcTransport _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport - __all__ = ( "JobControllerTransport", "JobControllerGrpcTransport", diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py index 800181a1..4eb0020e 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py @@ -149,6 +149,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -167,9 +171,14 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None # Run the base constructor. super().__init__( @@ -193,7 +202,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -240,13 +249,11 @@ def operations_client(self) -> operations_v1.OperationsClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsClient( - self.grpc_channel - ) + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def submit_job(self) -> Callable[[jobs.SubmitJobRequest], jobs.Job]: diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py index 5d23c945..1be3cb35 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py @@ -194,6 +194,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -212,6 +216,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. @@ -225,6 +233,7 @@ def __init__( ) self._stubs = {} + self._operations_client = None @property def grpc_channel(self) -> aio.Channel: @@ -244,13 +253,13 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def submit_job(self) -> Callable[[jobs.SubmitJobRequest], Awaitable[jobs.Job]]: diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py index 346e6875..71993784 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py @@ -51,6 +51,8 @@ class WorkflowTemplateServiceAsyncClient: DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT + cluster_path = staticmethod(WorkflowTemplateServiceClient.cluster_path) + parse_cluster_path = staticmethod(WorkflowTemplateServiceClient.parse_cluster_path) workflow_template_path = staticmethod( WorkflowTemplateServiceClient.workflow_template_path ) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py index 0584fd77..5c473496 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py @@ -148,6 +148,22 @@ def transport(self) -> WorkflowTemplateServiceTransport: """ return self._transport + @staticmethod + def cluster_path(project: str, location: str, cluster: str,) -> str: + """Return a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format( + project=project, location=location, cluster=cluster, + ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str, str]: + """Parse a cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def workflow_template_path( project: str, region: str, workflow_template: str, diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py index eb32b364..ac44e1f0 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py @@ -30,7 +30,6 @@ _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport - __all__ = ( "WorkflowTemplateServiceTransport", "WorkflowTemplateServiceGrpcTransport", diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py index b78dc810..8de2cce0 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py @@ -150,6 +150,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -168,9 +172,14 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None # Run the base constructor. super().__init__( @@ -194,7 +203,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optionsl[str]): The host for the channel to use. + address (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -241,13 +250,11 @@ def operations_client(self) -> operations_v1.OperationsClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsClient( - self.grpc_channel - ) + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_workflow_template( diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py index 11921398..81ffd56b 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py @@ -195,6 +195,10 @@ def __init__( ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) self._ssl_channel_credentials = ssl_credentials else: @@ -213,6 +217,10 @@ def __init__( ssl_credentials=ssl_channel_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) # Run the base constructor. @@ -226,6 +234,7 @@ def __init__( ) self._stubs = {} + self._operations_client = None @property def grpc_channel(self) -> aio.Channel: @@ -245,13 +254,13 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: client. """ # Sanity check: Only create a new client if we do not already have one. - if "operations_client" not in self.__dict__: - self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. - return self.__dict__["operations_client"] + return self._operations_client @property def create_workflow_template( diff --git a/google/cloud/dataproc_v1beta2/types/__init__.py b/google/cloud/dataproc_v1beta2/types/__init__.py index 9aa20055..23523cce 100644 --- a/google/cloud/dataproc_v1beta2/types/__init__.py +++ b/google/cloud/dataproc_v1beta2/types/__init__.py @@ -110,7 +110,6 @@ DeleteWorkflowTemplateRequest, ) - __all__ = ( "AutoscalingPolicy", "BasicAutoscalingAlgorithm", @@ -122,6 +121,7 @@ "DeleteAutoscalingPolicyRequest", "ListAutoscalingPoliciesRequest", "ListAutoscalingPoliciesResponse", + "Component", "Cluster", "ClusterConfig", "GkeClusterConfig", diff --git a/google/cloud/dataproc_v1beta2/types/clusters.py b/google/cloud/dataproc_v1beta2/types/clusters.py index a81c517f..97903e6f 100644 --- a/google/cloud/dataproc_v1beta2/types/clusters.py +++ b/google/cloud/dataproc_v1beta2/types/clusters.py @@ -135,14 +135,14 @@ class ClusterConfig(proto.Message): Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not - specify a temp bucket, - Dataproc will determine a Cloud Storage location - (US, ASIA, or EU) for your cluster's temp bucket - according to the Compute Engine zone where your - cluster is deployed, and then create and manage - this project-level, per-location bucket. The - default bucket has a TTL of 90 days, but you can - use any TTL (or none) if you specify a bucket. + specify a temp bucket, Dataproc will determine a + Cloud Storage location (US, ASIA, or EU) for + your cluster's temp bucket according to the + Compute Engine zone where your cluster is + deployed, and then create and manage this + project-level, per-location bucket. The default + bucket has a TTL of 90 days, but you can use any + TTL (or none) if you specify a bucket. gce_cluster_config (~.gcd_clusters.GceClusterConfig): Optional. The shared Compute Engine config settings for all instances in a cluster. diff --git a/synth.metadata b/synth.metadata index c119770d..209ee4ba 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-dataproc.git", - "sha": "05c933c1a3f3df0d35705e99604da20b353d72e7" + "sha": "ee093a88841c7f9c9ea41b066993e56b4abe267d" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "dabe30b45bc86fdaf4126f26cbcfd73babbfa3fe", - "internalRef": "345194337" + "sha": "69697504d9eba1d064820c3085b4750767be6d08", + "internalRef": "348952930" } }, { diff --git a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py index dab06a53..5fabfeb8 100644 --- a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py @@ -2042,6 +2042,10 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -2083,6 +2087,10 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_adc(transport_cl scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py index c9d47b54..6d36f46b 100644 --- a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -1836,6 +1836,10 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -1877,6 +1881,10 @@ def test_cluster_controller_transport_channel_mtls_with_adc(transport_class): scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1/test_job_controller.py b/tests/unit/gapic/dataproc_v1/test_job_controller.py index b19332a4..5529d6fa 100644 --- a/tests/unit/gapic/dataproc_v1/test_job_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_job_controller.py @@ -1904,6 +1904,10 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -1945,6 +1949,10 @@ def test_job_controller_transport_channel_mtls_with_adc(transport_class): scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py index be036496..2f171a18 100644 --- a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py @@ -2501,6 +2501,10 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -2542,6 +2546,10 @@ def test_workflow_template_service_transport_channel_mtls_with_adc(transport_cla scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py index 239f2993..1f24d213 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py @@ -2042,6 +2042,10 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -2083,6 +2087,10 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_adc(transport_cl scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py index 3e6bab80..863598c8 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py @@ -1838,6 +1838,10 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -1879,6 +1883,10 @@ def test_cluster_controller_transport_channel_mtls_with_adc(transport_class): scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel @@ -1909,8 +1917,33 @@ def test_cluster_controller_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client +def test_cluster_path(): + project = "squid" + location = "clam" + cluster = "whelk" + + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format( + project=project, location=location, cluster=cluster, + ) + actual = ClusterControllerClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "octopus", + "location": "oyster", + "cluster": "nudibranch", + } + path = ClusterControllerClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_cluster_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "squid" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -1921,7 +1954,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "clam", + "billing_account": "mussel", } path = ClusterControllerClient.common_billing_account_path(**expected) @@ -1931,7 +1964,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "whelk" + folder = "winkle" expected = "folders/{folder}".format(folder=folder,) actual = ClusterControllerClient.common_folder_path(folder) @@ -1940,7 +1973,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "octopus", + "folder": "nautilus", } path = ClusterControllerClient.common_folder_path(**expected) @@ -1950,7 +1983,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "oyster" + organization = "scallop" expected = "organizations/{organization}".format(organization=organization,) actual = ClusterControllerClient.common_organization_path(organization) @@ -1959,7 +1992,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nudibranch", + "organization": "abalone", } path = ClusterControllerClient.common_organization_path(**expected) @@ -1969,7 +2002,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "cuttlefish" + project = "squid" expected = "projects/{project}".format(project=project,) actual = ClusterControllerClient.common_project_path(project) @@ -1978,7 +2011,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "mussel", + "project": "clam", } path = ClusterControllerClient.common_project_path(**expected) @@ -1988,8 +2021,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "winkle" - location = "nautilus" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -2000,8 +2033,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "scallop", - "location": "abalone", + "project": "oyster", + "location": "nudibranch", } path = ClusterControllerClient.common_location_path(**expected) diff --git a/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py index 9c4ebd86..d9b0e661 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py @@ -1930,6 +1930,10 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -1971,6 +1975,10 @@ def test_job_controller_transport_channel_mtls_with_adc(transport_class): scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py index ef40c09d..00e5a115 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py @@ -2501,6 +2501,10 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @@ -2542,6 +2546,10 @@ def test_workflow_template_service_transport_channel_mtls_with_adc(transport_cla scopes=("https://www.googleapis.com/auth/cloud-platform",), ssl_credentials=mock_ssl_cred, quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], ) assert transport.grpc_channel == mock_grpc_channel @@ -2572,10 +2580,35 @@ def test_workflow_template_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_workflow_template_path(): +def test_cluster_path(): project = "squid" - region = "clam" - workflow_template = "whelk" + location = "clam" + cluster = "whelk" + + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format( + project=project, location=location, cluster=cluster, + ) + actual = WorkflowTemplateServiceClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "octopus", + "location": "oyster", + "cluster": "nudibranch", + } + path = WorkflowTemplateServiceClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_cluster_path(path) + assert expected == actual + + +def test_workflow_template_path(): + project = "cuttlefish" + region = "mussel" + workflow_template = "winkle" expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format( project=project, region=region, workflow_template=workflow_template, @@ -2588,9 +2621,9 @@ def test_workflow_template_path(): def test_parse_workflow_template_path(): expected = { - "project": "octopus", - "region": "oyster", - "workflow_template": "nudibranch", + "project": "nautilus", + "region": "scallop", + "workflow_template": "abalone", } path = WorkflowTemplateServiceClient.workflow_template_path(**expected) @@ -2600,7 +2633,7 @@ def test_parse_workflow_template_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -2611,7 +2644,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "clam", } path = WorkflowTemplateServiceClient.common_billing_account_path(**expected) @@ -2621,7 +2654,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = WorkflowTemplateServiceClient.common_folder_path(folder) @@ -2630,7 +2663,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "octopus", } path = WorkflowTemplateServiceClient.common_folder_path(**expected) @@ -2640,7 +2673,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = WorkflowTemplateServiceClient.common_organization_path(organization) @@ -2649,7 +2682,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "nudibranch", } path = WorkflowTemplateServiceClient.common_organization_path(**expected) @@ -2659,7 +2692,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = WorkflowTemplateServiceClient.common_project_path(project) @@ -2668,7 +2701,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "mussel", } path = WorkflowTemplateServiceClient.common_project_path(**expected) @@ -2678,8 +2711,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -2690,8 +2723,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "scallop", + "location": "abalone", } path = WorkflowTemplateServiceClient.common_location_path(**expected)