From 822315ec3f2517ebb6ca199b72156ebd50e0518b Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 16 Jul 2020 13:42:51 -0700 Subject: [PATCH] fix: correct protobuf type for diagnose_cluster, update retry configs (#55) --- .../autoscaling_policy_service_client.py | 84 ++--- ...utoscaling_policy_service_client_config.py | 137 +++++-- .../gapic/cluster_controller_client.py | 178 ++++----- .../gapic/cluster_controller_client_config.py | 75 ++-- .../gapic/job_controller_client.py | 198 +++++----- .../gapic/job_controller_client_config.py | 150 ++++++-- ...toscaling_policy_service_grpc_transport.py | 22 +- .../cluster_controller_grpc_transport.py | 32 +- .../job_controller_grpc_transport.py | 26 +- ...orkflow_template_service_grpc_transport.py | 58 +-- .../gapic/workflow_template_service_client.py | 354 +++++++++--------- ...workflow_template_service_client_config.py | 87 +++-- .../proto/autoscaling_policies_pb2.py | 2 +- google/cloud/dataproc_v1/proto/clusters.proto | 4 +- .../cloud/dataproc_v1/proto/clusters_pb2.py | 18 +- google/cloud/dataproc_v1/proto/jobs_pb2.py | 2 +- .../cloud/dataproc_v1/proto/operations_pb2.py | 2 +- google/cloud/dataproc_v1/proto/shared_pb2.py | 2 +- .../proto/workflow_templates_pb2.py | 2 +- ...utoscaling_policy_service_client_config.py | 133 +++++-- .../gapic/cluster_controller_client.py | 178 ++++----- .../gapic/cluster_controller_client_config.py | 77 ++-- .../gapic/job_controller_client.py | 198 +++++----- .../gapic/job_controller_client_config.py | 150 ++++++-- .../cluster_controller_grpc_transport.py | 32 +- .../job_controller_grpc_transport.py | 26 +- ...orkflow_template_service_grpc_transport.py | 58 +-- .../gapic/workflow_template_service_client.py | 354 +++++++++--------- ...workflow_template_service_client_config.py | 85 +++-- .../proto/autoscaling_policies_pb2.py | 2 +- .../dataproc_v1beta2/proto/clusters_pb2.py | 2 +- .../cloud/dataproc_v1beta2/proto/jobs_pb2.py | 2 +- .../dataproc_v1beta2/proto/operations_pb2.py | 2 +- .../dataproc_v1beta2/proto/shared_pb2.py | 2 +- .../proto/workflow_templates_pb2.py | 2 +- synth.metadata | 10 +- ...st_autoscaling_policy_service_client_v1.py | 28 +- .../v1/test_cluster_controller_client_v1.py | 113 +++--- .../gapic/v1/test_job_controller_client_v1.py | 130 +++---- ...est_workflow_template_service_client_v1.py | 168 ++++----- .../test_cluster_controller_client_v1beta2.py | 112 +++--- .../test_job_controller_client_v1beta2.py | 134 +++---- ...orkflow_template_service_client_v1beta2.py | 168 ++++----- 43 files changed, 2027 insertions(+), 1572 deletions(-) diff --git a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py b/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py index 3a732b60..c1627925 100644 --- a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py +++ b/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py @@ -218,31 +218,41 @@ def __init__( self._inner_api_calls = {} # Service calls - def update_autoscaling_policy( + def create_autoscaling_policy( self, + parent, policy, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. + Creates new autoscaling policy. Example: >>> from google.cloud import dataproc_v1 >>> >>> client = dataproc_v1.AutoscalingPolicyServiceClient() >>> + >>> parent = client.region_path('[PROJECT]', '[REGION]') + >>> >>> # TODO: Initialize `policy`: >>> policy = {} >>> - >>> response = client.update_autoscaling_policy(policy) + >>> response = client.create_autoscaling_policy(parent, policy) Args: - policy (Union[dict, ~google.cloud.dataproc_v1.types.AutoscalingPolicy]): Required. The updated autoscaling policy. + parent (str): Required. The "resource name" of the region or location, as + described in https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, the resource + name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.create``, the resource + name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + policy (Union[dict, ~google.cloud.dataproc_v1.types.AutoscalingPolicy]): Required. The autoscaling policy to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` @@ -266,24 +276,24 @@ def update_autoscaling_policy( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_autoscaling_policy" not in self._inner_api_calls: + if "create_autoscaling_policy" not in self._inner_api_calls: self._inner_api_calls[ - "update_autoscaling_policy" + "create_autoscaling_policy" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_autoscaling_policy, - default_retry=self._method_configs["UpdateAutoscalingPolicy"].retry, - default_timeout=self._method_configs["UpdateAutoscalingPolicy"].timeout, + self.transport.create_autoscaling_policy, + default_retry=self._method_configs["CreateAutoscalingPolicy"].retry, + default_timeout=self._method_configs["CreateAutoscalingPolicy"].timeout, client_info=self._client_info, ) - request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( - policy=policy, + request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( + parent=parent, policy=policy, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("policy.name", policy.name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -292,45 +302,35 @@ def update_autoscaling_policy( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_autoscaling_policy"]( + return self._inner_api_calls["create_autoscaling_policy"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def create_autoscaling_policy( + def update_autoscaling_policy( self, - parent, policy, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates new autoscaling policy. + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. Example: >>> from google.cloud import dataproc_v1 >>> >>> client = dataproc_v1.AutoscalingPolicyServiceClient() >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> >>> # TODO: Initialize `policy`: >>> policy = {} >>> - >>> response = client.create_autoscaling_policy(parent, policy) + >>> response = client.update_autoscaling_policy(policy) Args: - parent (str): Required. The "resource name" of the region or location, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, the resource - name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - policy (Union[dict, ~google.cloud.dataproc_v1.types.AutoscalingPolicy]): Required. The autoscaling policy to create. + policy (Union[dict, ~google.cloud.dataproc_v1.types.AutoscalingPolicy]): Required. The updated autoscaling policy. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` @@ -354,24 +354,24 @@ def create_autoscaling_policy( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_autoscaling_policy" not in self._inner_api_calls: + if "update_autoscaling_policy" not in self._inner_api_calls: self._inner_api_calls[ - "create_autoscaling_policy" + "update_autoscaling_policy" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_autoscaling_policy, - default_retry=self._method_configs["CreateAutoscalingPolicy"].retry, - default_timeout=self._method_configs["CreateAutoscalingPolicy"].timeout, + self.transport.update_autoscaling_policy, + default_retry=self._method_configs["UpdateAutoscalingPolicy"].retry, + default_timeout=self._method_configs["UpdateAutoscalingPolicy"].timeout, client_info=self._client_info, ) - request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( - parent=parent, policy=policy, + request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( + policy=policy, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("policy.name", policy.name)] except AttributeError: pass else: @@ -380,7 +380,7 @@ def create_autoscaling_policy( ) metadata.append(routing_metadata) - return self._inner_api_calls["create_autoscaling_policy"]( + return self._inner_api_calls["update_autoscaling_policy"]( request, retry=retry, timeout=timeout, metadata=metadata ) diff --git a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py b/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py index dc98b7bd..7066450a 100644 --- a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py +++ b/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py @@ -2,45 +2,136 @@ "interfaces": { "google.cloud.dataproc.v1.AutoscalingPolicyService": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], + "retry_policy_4_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], + "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "retry_policy_6_codes": [ + "INTERNAL", + "DEADLINE_EXCEEDED", + "UNAVAILABLE", + ], + "no_retry_codes": [], + "retry_policy_3_codes": ["UNAVAILABLE"], + "retry_policy_2_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], + "no_retry_1_codes": [], + "retry_policy_5_codes": ["UNAVAILABLE"], + "retry_policy_7_codes": ["UNAVAILABLE"], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 600000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, + "max_rpc_timeout_millis": 600000, "total_timeout_millis": 600000, - } + }, + "retry_policy_3_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "retry_policy_2_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000, + }, + "retry_policy_6_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_7_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000, + }, + "retry_policy_5_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_4_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, }, "methods": { - "UpdateAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, "CreateAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", + }, + "UpdateAutoscalingPolicy": { + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "GetAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListAutoscalingPolicies": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "DeleteAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, }, } diff --git a/google/cloud/dataproc_v1/gapic/cluster_controller_client.py b/google/cloud/dataproc_v1/gapic/cluster_controller_client.py index fc1608ac..9b01fa67 100644 --- a/google/cloud/dataproc_v1/gapic/cluster_controller_client.py +++ b/google/cloud/dataproc_v1/gapic/cluster_controller_client.py @@ -600,6 +600,95 @@ def delete_cluster( metadata_type=proto_operations_pb2.ClusterOperationMetadata, ) + def diagnose_cluster( + self, + project_id, + region, + cluster_name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets cluster diagnostic information. The returned + ``Operation.metadata`` will be + `ClusterOperationMetadata `__. + After the operation completes, ``Operation.response`` contains + `DiagnoseClusterResults `__. + + Example: + >>> from google.cloud import dataproc_v1 + >>> + >>> client = dataproc_v1.ClusterControllerClient() + >>> + >>> # TODO: Initialize `project_id`: + >>> project_id = '' + >>> + >>> # TODO: Initialize `region`: + >>> region = '' + >>> + >>> # TODO: Initialize `cluster_name`: + >>> cluster_name = '' + >>> + >>> response = client.diagnose_cluster(project_id, region, cluster_name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster + belongs to. + region (str): Required. The Dataproc region in which to handle the request. + cluster_name (str): Required. The cluster name. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "diagnose_cluster" not in self._inner_api_calls: + self._inner_api_calls[ + "diagnose_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.diagnose_cluster, + default_retry=self._method_configs["DiagnoseCluster"].retry, + default_timeout=self._method_configs["DiagnoseCluster"].timeout, + client_info=self._client_info, + ) + + request = clusters_pb2.DiagnoseClusterRequest( + project_id=project_id, region=region, cluster_name=cluster_name, + ) + operation = self._inner_api_calls["diagnose_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + clusters_pb2.DiagnoseClusterResults, + metadata_type=proto_operations_pb2.ClusterOperationMetadata, + ) + def get_cluster( self, project_id, @@ -787,92 +876,3 @@ def list_clusters( response_token_field="next_page_token", ) return iterator - - def diagnose_cluster( - self, - project_id, - region, - cluster_name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - `DiagnoseClusterResults `__. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.diagnose_cluster(project_id, region, cluster_name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "diagnose_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "diagnose_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.diagnose_cluster, - default_retry=self._method_configs["DiagnoseCluster"].retry, - default_timeout=self._method_configs["DiagnoseCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name, - ) - operation = self._inner_api_calls["diagnose_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=clusters_pb2.DiagnoseClusterResults, - ) diff --git a/google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py b/google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py index a1081c8d..51479bb1 100644 --- a/google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py +++ b/google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py @@ -2,50 +2,73 @@ "interfaces": { "google.cloud.dataproc.v1.ClusterController": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent": ["UNAVAILABLE"], + "retry_policy_6_codes": [ + "INTERNAL", + "DEADLINE_EXCEEDED", + "UNAVAILABLE", + ], + "no_retry_codes": [], + "retry_policy_5_codes": ["UNAVAILABLE"], }, "retry_params": { - "default": { + "retry_policy_6_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 30000, + "initial_rpc_timeout_millis": 300000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, + "max_rpc_timeout_millis": 300000, "total_timeout_millis": 300000, - } + }, + "retry_policy_5_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, }, "methods": { "CreateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_5_codes", + "retry_params_name": "retry_policy_5_params", }, "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_5_codes", + "retry_params_name": "retry_policy_5_params", }, "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_5_codes", + "retry_params_name": "retry_policy_5_params", + }, + "DiagnoseCluster": { + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_5_codes", + "retry_params_name": "retry_policy_5_params", }, "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_6_codes", + "retry_params_name": "retry_policy_6_params", }, "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DiagnoseCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_6_codes", + "retry_params_name": "retry_policy_6_params", }, }, } diff --git a/google/cloud/dataproc_v1/gapic/job_controller_client.py b/google/cloud/dataproc_v1/gapic/job_controller_client.py index a2b17aae..25f12dfa 100644 --- a/google/cloud/dataproc_v1/gapic/job_controller_client.py +++ b/google/cloud/dataproc_v1/gapic/job_controller_client.py @@ -279,6 +279,105 @@ def submit_job( request, retry=retry, timeout=timeout, metadata=metadata ) + def submit_job_as_operation( + self, + project_id, + region, + job, + request_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Submits job to a cluster. + + Example: + >>> from google.cloud import dataproc_v1 + >>> + >>> client = dataproc_v1.JobControllerClient() + >>> + >>> # TODO: Initialize `project_id`: + >>> project_id = '' + >>> + >>> # TODO: Initialize `region`: + >>> region = '' + >>> + >>> # TODO: Initialize `job`: + >>> job = {} + >>> + >>> response = client.submit_job_as_operation(project_id, region, job) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + project_id (str): Required. The ID of the Google Cloud Platform project that the job + belongs to. + region (str): Required. The Dataproc region in which to handle the request. + job (Union[dict, ~google.cloud.dataproc_v1.types.Job]): Required. The job resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.dataproc_v1.types.Job` + request_id (str): Optional. A unique id used to identify the request. If the server + receives two ``SubmitJobRequest`` requests with the same id, then the + second request will be ignored and the first ``Job`` created and stored + in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), underscores + (_), and hyphens (-). The maximum length is 40 characters. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "submit_job_as_operation" not in self._inner_api_calls: + self._inner_api_calls[ + "submit_job_as_operation" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.submit_job_as_operation, + default_retry=self._method_configs["SubmitJobAsOperation"].retry, + default_timeout=self._method_configs["SubmitJobAsOperation"].timeout, + client_info=self._client_info, + ) + + request = jobs_pb2.SubmitJobRequest( + project_id=project_id, region=region, job=job, request_id=request_id, + ) + operation = self._inner_api_calls["submit_job_as_operation"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + jobs_pb2.Job, + metadata_type=jobs_pb2.JobMetadata, + ) + def get_job( self, project_id, @@ -709,102 +808,3 @@ def delete_job( self._inner_api_calls["delete_job"]( request, retry=retry, timeout=timeout, metadata=metadata ) - - def submit_job_as_operation( - self, - project_id, - region, - job, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Submits job to a cluster. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> response = client.submit_job_as_operation(project_id, region, job) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job (Union[dict, ~google.cloud.dataproc_v1.types.Job]): Required. The job resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.Job` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``SubmitJobRequest`` requests with the same id, then the - second request will be ignored and the first ``Job`` created and stored - in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "submit_job_as_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "submit_job_as_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.submit_job_as_operation, - default_retry=self._method_configs["SubmitJobAsOperation"].retry, - default_timeout=self._method_configs["SubmitJobAsOperation"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job, request_id=request_id, - ) - operation = self._inner_api_calls["submit_job_as_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - jobs_pb2.Job, - metadata_type=jobs_pb2.JobMetadata, - ) diff --git a/google/cloud/dataproc_v1/gapic/job_controller_client_config.py b/google/cloud/dataproc_v1/gapic/job_controller_client_config.py index 69ef2d50..c04bef57 100644 --- a/google/cloud/dataproc_v1/gapic/job_controller_client_config.py +++ b/google/cloud/dataproc_v1/gapic/job_controller_client_config.py @@ -2,56 +2,146 @@ "interfaces": { "google.cloud.dataproc.v1.JobController": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent2": [], - "non_idempotent": ["UNAVAILABLE"], + "retry_policy_4_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], + "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "retry_policy_6_codes": [ + "INTERNAL", + "DEADLINE_EXCEEDED", + "UNAVAILABLE", + ], + "no_retry_codes": [], + "retry_policy_3_codes": ["UNAVAILABLE"], + "retry_policy_2_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], + "no_retry_1_codes": [], + "retry_policy_5_codes": ["UNAVAILABLE"], + "retry_policy_7_codes": ["UNAVAILABLE"], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 30000, + "initial_rpc_timeout_millis": 600000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "retry_policy_3_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "retry_policy_2_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000, + }, + "retry_policy_6_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_7_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, "total_timeout_millis": 900000, - } + }, + "retry_policy_5_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_4_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, }, "methods": { "SubmitJob": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_7_codes", + "retry_params_name": "retry_policy_7_params", + }, + "SubmitJobAsOperation": { + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_7_codes", + "retry_params_name": "retry_policy_7_params", }, "GetJob": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_2_codes", + "retry_params_name": "retry_policy_2_params", }, "ListJobs": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_2_codes", + "retry_params_name": "retry_policy_2_params", }, "UpdateJob": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_7_codes", + "retry_params_name": "retry_policy_7_params", }, "CancelJob": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_2_codes", + "retry_params_name": "retry_policy_2_params", }, "DeleteJob": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SubmitJobAsOperation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent2", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_7_codes", + "retry_params_name": "retry_policy_7_params", }, }, } diff --git a/google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py index 7f9c704d..b2b4e646 100644 --- a/google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py +++ b/google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py @@ -110,33 +110,33 @@ def channel(self): return self._channel @property - def update_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.update_autoscaling_policy`. - - Updates (replaces) autoscaling policy. + def create_autoscaling_policy(self): + """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.create_autoscaling_policy`. - Disabled check for update_mask, because all updates will be full - replacements. + Creates new autoscaling policy. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["autoscaling_policy_service_stub"].UpdateAutoscalingPolicy + return self._stubs["autoscaling_policy_service_stub"].CreateAutoscalingPolicy @property - def create_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.create_autoscaling_policy`. + def update_autoscaling_policy(self): + """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.update_autoscaling_policy`. - Creates new autoscaling policy. + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["autoscaling_policy_service_stub"].CreateAutoscalingPolicy + return self._stubs["autoscaling_policy_service_stub"].UpdateAutoscalingPolicy @property def get_autoscaling_policy(self): diff --git a/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py index 3c8c780f..5fc36d86 100644 --- a/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py +++ b/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py @@ -161,44 +161,44 @@ def delete_cluster(self): return self._stubs["cluster_controller_stub"].DeleteCluster @property - def get_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.get_cluster`. + def diagnose_cluster(self): + """Return the gRPC stub for :meth:`ClusterControllerClient.diagnose_cluster`. - Gets the resource representation for a cluster in a project. + Gets cluster diagnostic information. The returned + ``Operation.metadata`` will be + `ClusterOperationMetadata `__. + After the operation completes, ``Operation.response`` contains + `DiagnoseClusterResults `__. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["cluster_controller_stub"].GetCluster + return self._stubs["cluster_controller_stub"].DiagnoseCluster @property - def list_clusters(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.list_clusters`. + def get_cluster(self): + """Return the gRPC stub for :meth:`ClusterControllerClient.get_cluster`. - Lists all regions/{region}/clusters in a project alphabetically. + Gets the resource representation for a cluster in a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["cluster_controller_stub"].ListClusters + return self._stubs["cluster_controller_stub"].GetCluster @property - def diagnose_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.diagnose_cluster`. + def list_clusters(self): + """Return the gRPC stub for :meth:`ClusterControllerClient.list_clusters`. - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - `DiagnoseClusterResults `__. + Lists all regions/{region}/clusters in a project alphabetically. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["cluster_controller_stub"].DiagnoseCluster + return self._stubs["cluster_controller_stub"].ListClusters diff --git a/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py index 84b454eb..54a30763 100644 --- a/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py +++ b/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py @@ -128,6 +128,19 @@ def submit_job(self): """ return self._stubs["job_controller_stub"].SubmitJob + @property + def submit_job_as_operation(self): + """Return the gRPC stub for :meth:`JobControllerClient.submit_job_as_operation`. + + Submits job to a cluster. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["job_controller_stub"].SubmitJobAsOperation + @property def get_job(self): """Return the gRPC stub for :meth:`JobControllerClient.get_job`. @@ -197,16 +210,3 @@ def delete_job(self): deserialized response object. """ return self._stubs["job_controller_stub"].DeleteJob - - @property - def submit_job_as_operation(self): - """Return the gRPC stub for :meth:`JobControllerClient.submit_job_as_operation`. - - Submits job to a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].SubmitJobAsOperation diff --git a/google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py index 7388eca6..705ab40b 100644 --- a/google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py +++ b/google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py @@ -117,35 +117,6 @@ def channel(self): """ return self._channel - @property - def create_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.create_workflow_template`. - - Creates new workflow template. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate - - @property - def get_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.get_workflow_template`. - - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate - @property def instantiate_workflow_template(self): """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.instantiate_workflow_template`. @@ -208,6 +179,35 @@ def instantiate_inline_workflow_template(self): "workflow_template_service_stub" ].InstantiateInlineWorkflowTemplate + @property + def create_workflow_template(self): + """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.create_workflow_template`. + + Creates new workflow template. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate + + @property + def get_workflow_template(self): + """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.get_workflow_template`. + + Retrieves the latest workflow template. + + Can retrieve previously instantiated template by specifying optional + version parameter. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate + @property def update_workflow_template(self): """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.update_workflow_template`. diff --git a/google/cloud/dataproc_v1/gapic/workflow_template_service_client.py b/google/cloud/dataproc_v1/gapic/workflow_template_service_client.py index 200ceee9..417a1dd0 100644 --- a/google/cloud/dataproc_v1/gapic/workflow_template_service_client.py +++ b/google/cloud/dataproc_v1/gapic/workflow_template_service_client.py @@ -230,183 +230,6 @@ def __init__( self._inner_api_calls = {} # Service calls - def create_workflow_template( - self, - parent, - template, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates new workflow template. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.create_workflow_template(parent, template) - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (Union[dict, ~google.cloud.dataproc_v1.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "create_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_workflow_template, - default_retry=self._method_configs["CreateWorkflowTemplate"].retry, - default_timeout=self._method_configs["CreateWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_workflow_template( - self, - name, - version=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.get_workflow_template(name) - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the resource name of - the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the resource name - of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to retrieve. Only previously - instantiated versions can be retrieved. - - If unspecified, retrieves the current version. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "get_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_workflow_template, - default_retry=self._method_configs["GetWorkflowTemplate"].retry, - default_timeout=self._method_configs["GetWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.GetWorkflowTemplateRequest( - name=name, version=version, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - def instantiate_workflow_template( self, name, @@ -676,6 +499,183 @@ def instantiate_inline_workflow_template( metadata_type=workflow_templates_pb2.WorkflowMetadata, ) + def create_workflow_template( + self, + parent, + template, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Creates new workflow template. + + Example: + >>> from google.cloud import dataproc_v1 + >>> + >>> client = dataproc_v1.WorkflowTemplateServiceClient() + >>> + >>> parent = client.region_path('[PROJECT]', '[REGION]') + >>> + >>> # TODO: Initialize `template`: + >>> template = {} + >>> + >>> response = client.create_workflow_template(parent, template) + + Args: + parent (str): Required. The resource name of the region or location, as described + in https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, the resource name + of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, the resource + name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + template (Union[dict, ~google.cloud.dataproc_v1.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_workflow_template" not in self._inner_api_calls: + self._inner_api_calls[ + "create_workflow_template" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_workflow_template, + default_retry=self._method_configs["CreateWorkflowTemplate"].retry, + default_timeout=self._method_configs["CreateWorkflowTemplate"].timeout, + client_info=self._client_info, + ) + + request = workflow_templates_pb2.CreateWorkflowTemplateRequest( + parent=parent, template=template, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["create_workflow_template"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def get_workflow_template( + self, + name, + version=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Retrieves the latest workflow template. + + Can retrieve previously instantiated template by specifying optional + version parameter. + + Example: + >>> from google.cloud import dataproc_v1 + >>> + >>> client = dataproc_v1.WorkflowTemplateServiceClient() + >>> + >>> # TODO: Initialize `name`: + >>> name = '' + >>> + >>> response = client.get_workflow_template(name) + + Args: + name (str): Required. The resource name of the workflow template, as described + in https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the resource name of + the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the resource name + of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): Optional. The version of workflow template to retrieve. Only previously + instantiated versions can be retrieved. + + If unspecified, retrieves the current version. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_workflow_template" not in self._inner_api_calls: + self._inner_api_calls[ + "get_workflow_template" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_workflow_template, + default_retry=self._method_configs["GetWorkflowTemplate"].retry, + default_timeout=self._method_configs["GetWorkflowTemplate"].timeout, + client_info=self._client_info, + ) + + request = workflow_templates_pb2.GetWorkflowTemplateRequest( + name=name, version=version, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_workflow_template"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + def update_workflow_template( self, template, diff --git a/google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py b/google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py index 8b6be43f..ec0aea38 100644 --- a/google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py +++ b/google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py @@ -2,55 +2,78 @@ "interfaces": { "google.cloud.dataproc.v1.WorkflowTemplateService": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent": ["UNAVAILABLE"], + "retry_policy_4_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], + "no_retry_codes": [], + "retry_policy_3_codes": ["UNAVAILABLE"], }, "retry_params": { - "default": { + "retry_policy_3_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 30000, + "initial_rpc_timeout_millis": 600000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, - "total_timeout_millis": 900000, - } - }, - "methods": { - "CreateWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, }, - "GetWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_policy_4_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + }, + "methods": { "InstantiateWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_3_codes", + "retry_params_name": "retry_policy_3_params", }, "InstantiateInlineWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_3_codes", + "retry_params_name": "retry_policy_3_params", + }, + "CreateWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_3_codes", + "retry_params_name": "retry_policy_3_params", + }, + "GetWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_4_codes", + "retry_params_name": "retry_policy_4_params", }, "UpdateWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_3_codes", + "retry_params_name": "retry_policy_3_params", }, "ListWorkflowTemplates": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_4_codes", + "retry_params_name": "retry_policy_4_params", }, "DeleteWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_3_codes", + "retry_params_name": "retry_policy_3_params", }, }, } diff --git a/google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py b/google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py index 32570daf..ae720579 100644 --- a/google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py +++ b/google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1/proto/autoscaling_policies.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1/proto/clusters.proto b/google/cloud/dataproc_v1/proto/clusters.proto index fbaf9391..c66d35d3 100644 --- a/google/cloud/dataproc_v1/proto/clusters.proto +++ b/google/cloud/dataproc_v1/proto/clusters.proto @@ -111,8 +111,8 @@ service ClusterController { }; option (google.api.method_signature) = "project_id,region,cluster_name"; option (google.longrunning.operation_info) = { - response_type: "google.protobuf.Empty" - metadata_type: "DiagnoseClusterResults" + response_type: "DiagnoseClusterResults" + metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" }; } } diff --git a/google/cloud/dataproc_v1/proto/clusters_pb2.py b/google/cloud/dataproc_v1/proto/clusters_pb2.py index aa7312de..0b950767 100644 --- a/google/cloud/dataproc_v1/proto/clusters_pb2.py +++ b/google/cloud/dataproc_v1/proto/clusters_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1/proto/clusters.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -32,7 +32,7 @@ syntax="proto3", serialized_options=b"\n\034com.google.cloud.dataproc.v1B\rClustersProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc", create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/dataproc_v1/proto/clusters.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a+google/cloud/dataproc_v1/proto/shared.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcd\x03\n\x07\x43luster\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x06\x63onfig\x18\x03 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterConfigB\x03\xe0\x41\x02\x12\x42\n\x06labels\x18\x08 \x03(\x0b\x32-.google.cloud.dataproc.v1.Cluster.LabelsEntryB\x03\xe0\x41\x01\x12<\n\x06status\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x44\n\x0estatus_history\x18\x07 \x03(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x06 \x01(\tB\x03\xe0\x41\x03\x12>\n\x07metrics\x18\t \x01(\x0b\x32(.google.cloud.dataproc.v1.ClusterMetricsB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb0\x06\n\rClusterConfig\x12\x1a\n\rconfig_bucket\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12K\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32*.google.cloud.dataproc.v1.GceClusterConfigB\x03\xe0\x41\x01\x12I\n\rmaster_config\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12I\n\rworker_config\x18\n \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12S\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsoftware_config\x18\r \x01(\x0b\x32(.google.cloud.dataproc.v1.SoftwareConfigB\x03\xe0\x41\x01\x12W\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x32.google.cloud.dataproc.v1.NodeInitializationActionB\x03\xe0\x41\x01\x12J\n\x11\x65ncryption_config\x18\x0f \x01(\x0b\x32*.google.cloud.dataproc.v1.EncryptionConfigB\x03\xe0\x41\x01\x12L\n\x12\x61utoscaling_config\x18\x12 \x01(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsecurity_config\x18\x10 \x01(\x0b\x32(.google.cloud.dataproc.v1.SecurityConfigB\x03\xe0\x41\x01\x12H\n\x10lifecycle_config\x18\x11 \x01(\x0b\x32).google.cloud.dataproc.v1.LifecycleConfigB\x03\xe0\x41\x01",\n\x11\x41utoscalingConfig\x12\x17\n\npolicy_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01"4\n\x10\x45ncryptionConfig\x12 \n\x13gce_pd_kms_key_name\x18\x01 \x01(\tB\x03\xe0\x41\x01"\x9f\x03\n\x10GceClusterConfig\x12\x15\n\x08zone_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0bnetwork_uri\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0esubnetwork_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10internal_ip_only\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1c\n\x0fservice_account\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12#\n\x16service_account_scopes\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12J\n\x08metadata\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry\x12P\n\x14reservation_affinity\x18\x0b \x01(\x0b\x32-.google.cloud.dataproc.v1.ReservationAffinityB\x03\xe0\x41\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x03\n\x13InstanceGroupConfig\x12\x1a\n\rnum_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0einstance_names\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x16\n\timage_uri\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10machine_type_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12>\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.DiskConfigB\x03\xe0\x41\x01\x12\x1b\n\x0eis_preemptible\x18\x06 \x01(\x08\x42\x03\xe0\x41\x03\x12O\n\x14managed_group_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1.ManagedGroupConfigB\x03\xe0\x41\x03\x12\x46\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32+.google.cloud.dataproc.v1.AcceleratorConfigB\x03\xe0\x41\x01\x12\x1d\n\x10min_cpu_platform\x18\t \x01(\tB\x03\xe0\x41\x01"c\n\x12ManagedGroupConfig\x12#\n\x16instance_template_name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12(\n\x1binstance_group_manager_name\x18\x02 \x01(\tB\x03\xe0\x41\x03"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05"f\n\nDiskConfig\x12\x1b\n\x0e\x62oot_disk_type\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1e\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0enum_local_ssds\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01"s\n\x18NodeInitializationAction\x12\x1c\n\x0f\x65xecutable_file\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\x84\x03\n\rClusterStatus\x12\x41\n\x05state\x18\x01 \x01(\x0e\x32-.google.cloud.dataproc.v1.ClusterStatus.StateB\x03\xe0\x41\x03\x12\x16\n\x06\x64\x65tail\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12G\n\x08substate\x18\x04 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.ClusterStatus.SubstateB\x03\xe0\x41\x03"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02"S\n\x0eSecurityConfig\x12\x41\n\x0fkerberos_config\x18\x01 \x01(\x0b\x32(.google.cloud.dataproc.v1.KerberosConfig"\x90\x04\n\x0eKerberosConfig\x12\x1c\n\x0f\x65nable_kerberos\x18\x01 \x01(\x08\x42\x03\xe0\x41\x01\x12(\n\x1broot_principal_password_uri\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0bkms_key_uri\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0ckeystore_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0etruststore_uri\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12"\n\x15keystore_password_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10key_password_uri\x18\x07 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17truststore_password_uri\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17\x63ross_realm_trust_realm\x18\t \x01(\tB\x03\xe0\x41\x01\x12"\n\x15\x63ross_realm_trust_kdc\x18\n \x01(\tB\x03\xe0\x41\x01\x12+\n\x1e\x63ross_realm_trust_admin_server\x18\x0b \x01(\tB\x03\xe0\x41\x01\x12\x32\n%cross_realm_trust_shared_password_uri\x18\x0c \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0ekdc_db_key_uri\x18\r \x01(\tB\x03\xe0\x41\x01\x12\x1f\n\x12tgt_lifetime_hours\x18\x0e \x01(\x05\x42\x03\xe0\x41\x01\x12\x12\n\x05realm\x18\x0f \x01(\tB\x03\xe0\x41\x01"\xf9\x01\n\x0eSoftwareConfig\x12\x1a\n\rimage_version\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x02 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntryB\x03\xe0\x41\x01\x12\x45\n\x13optional_components\x18\x03 \x03(\x0e\x32#.google.cloud.dataproc.v1.ComponentB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x83\x02\n\x0fLifecycleConfig\x12\x37\n\x0fidle_delete_ttl\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12;\n\x10\x61uto_delete_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x01H\x00\x12\x39\n\x0f\x61uto_delete_ttl\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01H\x00\x12\x38\n\x0fidle_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x42\x05\n\x03ttl"\x9a\x02\n\x0e\x43lusterMetrics\x12O\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry\x12O\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x96\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x02 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xae\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"n\n\x14ListClustersResponse\x12\x38\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03"\xf8\x01\n\x13ReservationAffinity\x12Y\n\x18\x63onsume_reservation_type\x18\x01 \x01(\x0e\x32\x32.google.cloud.dataproc.v1.ReservationAffinity.TypeB\x03\xe0\x41\x01\x12\x10\n\x03key\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06values\x18\x03 \x03(\tB\x03\xe0\x41\x01"_\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0eNO_RESERVATION\x10\x01\x12\x13\n\x0f\x41NY_RESERVATION\x10\x02\x12\x18\n\x14SPECIFIC_RESERVATION\x10\x03\x32\xe3\x0c\n\x11\x43lusterController\x12\x80\x02\n\rCreateCluster\x12..google.cloud.dataproc.v1.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\x9f\x01\x82\xd3\xe4\x93\x02>"3/v1/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x19project_id,region,cluster\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xa8\x02\n\rUpdateCluster\x12..google.cloud.dataproc.v1.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xc7\x01\x82\xd3\xe4\x93\x02M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xda\x41\x32project_id,region,cluster_name,cluster,update_mask\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\x99\x02\n\rDeleteCluster\x12..google.cloud.dataproc.v1.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xb8\x01\x82\xd3\xe4\x93\x02\x44*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\xca\x41J\n\x15google.protobuf.Empty\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xc9\x01\n\nGetCluster\x12+.google.cloud.dataproc.v1.GetClusterRequest\x1a!.google.cloud.dataproc.v1.Cluster"k\x82\xd3\xe4\x93\x02\x44\x12\x42/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\x12\xd9\x01\n\x0cListClusters\x12-.google.cloud.dataproc.v1.ListClustersRequest\x1a..google.cloud.dataproc.v1.ListClustersResponse"j\x82\xd3\xe4\x93\x02\x35\x12\x33/v1/projects/{project_id}/regions/{region}/clusters\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\x8e\x02\n\x0f\x44iagnoseCluster\x12\x30.google.cloud.dataproc.v1.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xa9\x01\x82\xd3\xe4\x93\x02P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41\x1eproject_id,region,cluster_name\xca\x41/\n\x15google.protobuf.Empty\x12\x16\x44iagnoseClusterResults\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBq\n\x1c\x63om.google.cloud.dataproc.v1B\rClustersProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3', + serialized_pb=b'\n-google/cloud/dataproc_v1/proto/clusters.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a+google/cloud/dataproc_v1/proto/shared.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcd\x03\n\x07\x43luster\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x06\x63onfig\x18\x03 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterConfigB\x03\xe0\x41\x02\x12\x42\n\x06labels\x18\x08 \x03(\x0b\x32-.google.cloud.dataproc.v1.Cluster.LabelsEntryB\x03\xe0\x41\x01\x12<\n\x06status\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x44\n\x0estatus_history\x18\x07 \x03(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x06 \x01(\tB\x03\xe0\x41\x03\x12>\n\x07metrics\x18\t \x01(\x0b\x32(.google.cloud.dataproc.v1.ClusterMetricsB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb0\x06\n\rClusterConfig\x12\x1a\n\rconfig_bucket\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12K\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32*.google.cloud.dataproc.v1.GceClusterConfigB\x03\xe0\x41\x01\x12I\n\rmaster_config\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12I\n\rworker_config\x18\n \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12S\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsoftware_config\x18\r \x01(\x0b\x32(.google.cloud.dataproc.v1.SoftwareConfigB\x03\xe0\x41\x01\x12W\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x32.google.cloud.dataproc.v1.NodeInitializationActionB\x03\xe0\x41\x01\x12J\n\x11\x65ncryption_config\x18\x0f \x01(\x0b\x32*.google.cloud.dataproc.v1.EncryptionConfigB\x03\xe0\x41\x01\x12L\n\x12\x61utoscaling_config\x18\x12 \x01(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsecurity_config\x18\x10 \x01(\x0b\x32(.google.cloud.dataproc.v1.SecurityConfigB\x03\xe0\x41\x01\x12H\n\x10lifecycle_config\x18\x11 \x01(\x0b\x32).google.cloud.dataproc.v1.LifecycleConfigB\x03\xe0\x41\x01",\n\x11\x41utoscalingConfig\x12\x17\n\npolicy_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01"4\n\x10\x45ncryptionConfig\x12 \n\x13gce_pd_kms_key_name\x18\x01 \x01(\tB\x03\xe0\x41\x01"\x9f\x03\n\x10GceClusterConfig\x12\x15\n\x08zone_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0bnetwork_uri\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0esubnetwork_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10internal_ip_only\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1c\n\x0fservice_account\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12#\n\x16service_account_scopes\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12J\n\x08metadata\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry\x12P\n\x14reservation_affinity\x18\x0b \x01(\x0b\x32-.google.cloud.dataproc.v1.ReservationAffinityB\x03\xe0\x41\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x03\n\x13InstanceGroupConfig\x12\x1a\n\rnum_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0einstance_names\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x16\n\timage_uri\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10machine_type_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12>\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.DiskConfigB\x03\xe0\x41\x01\x12\x1b\n\x0eis_preemptible\x18\x06 \x01(\x08\x42\x03\xe0\x41\x03\x12O\n\x14managed_group_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1.ManagedGroupConfigB\x03\xe0\x41\x03\x12\x46\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32+.google.cloud.dataproc.v1.AcceleratorConfigB\x03\xe0\x41\x01\x12\x1d\n\x10min_cpu_platform\x18\t \x01(\tB\x03\xe0\x41\x01"c\n\x12ManagedGroupConfig\x12#\n\x16instance_template_name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12(\n\x1binstance_group_manager_name\x18\x02 \x01(\tB\x03\xe0\x41\x03"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05"f\n\nDiskConfig\x12\x1b\n\x0e\x62oot_disk_type\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1e\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0enum_local_ssds\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01"s\n\x18NodeInitializationAction\x12\x1c\n\x0f\x65xecutable_file\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\x84\x03\n\rClusterStatus\x12\x41\n\x05state\x18\x01 \x01(\x0e\x32-.google.cloud.dataproc.v1.ClusterStatus.StateB\x03\xe0\x41\x03\x12\x16\n\x06\x64\x65tail\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12G\n\x08substate\x18\x04 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.ClusterStatus.SubstateB\x03\xe0\x41\x03"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02"S\n\x0eSecurityConfig\x12\x41\n\x0fkerberos_config\x18\x01 \x01(\x0b\x32(.google.cloud.dataproc.v1.KerberosConfig"\x90\x04\n\x0eKerberosConfig\x12\x1c\n\x0f\x65nable_kerberos\x18\x01 \x01(\x08\x42\x03\xe0\x41\x01\x12(\n\x1broot_principal_password_uri\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0bkms_key_uri\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0ckeystore_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0etruststore_uri\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12"\n\x15keystore_password_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10key_password_uri\x18\x07 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17truststore_password_uri\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17\x63ross_realm_trust_realm\x18\t \x01(\tB\x03\xe0\x41\x01\x12"\n\x15\x63ross_realm_trust_kdc\x18\n \x01(\tB\x03\xe0\x41\x01\x12+\n\x1e\x63ross_realm_trust_admin_server\x18\x0b \x01(\tB\x03\xe0\x41\x01\x12\x32\n%cross_realm_trust_shared_password_uri\x18\x0c \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0ekdc_db_key_uri\x18\r \x01(\tB\x03\xe0\x41\x01\x12\x1f\n\x12tgt_lifetime_hours\x18\x0e \x01(\x05\x42\x03\xe0\x41\x01\x12\x12\n\x05realm\x18\x0f \x01(\tB\x03\xe0\x41\x01"\xf9\x01\n\x0eSoftwareConfig\x12\x1a\n\rimage_version\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x02 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntryB\x03\xe0\x41\x01\x12\x45\n\x13optional_components\x18\x03 \x03(\x0e\x32#.google.cloud.dataproc.v1.ComponentB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x83\x02\n\x0fLifecycleConfig\x12\x37\n\x0fidle_delete_ttl\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12;\n\x10\x61uto_delete_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x01H\x00\x12\x39\n\x0f\x61uto_delete_ttl\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01H\x00\x12\x38\n\x0fidle_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x42\x05\n\x03ttl"\x9a\x02\n\x0e\x43lusterMetrics\x12O\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry\x12O\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x96\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x02 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xae\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"n\n\x14ListClustersResponse\x12\x38\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03"\xf8\x01\n\x13ReservationAffinity\x12Y\n\x18\x63onsume_reservation_type\x18\x01 \x01(\x0e\x32\x32.google.cloud.dataproc.v1.ReservationAffinity.TypeB\x03\xe0\x41\x01\x12\x10\n\x03key\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06values\x18\x03 \x03(\tB\x03\xe0\x41\x01"_\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0eNO_RESERVATION\x10\x01\x12\x13\n\x0f\x41NY_RESERVATION\x10\x02\x12\x18\n\x14SPECIFIC_RESERVATION\x10\x03\x32\xff\x0c\n\x11\x43lusterController\x12\x80\x02\n\rCreateCluster\x12..google.cloud.dataproc.v1.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\x9f\x01\x82\xd3\xe4\x93\x02>"3/v1/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x19project_id,region,cluster\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xa8\x02\n\rUpdateCluster\x12..google.cloud.dataproc.v1.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xc7\x01\x82\xd3\xe4\x93\x02M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xda\x41\x32project_id,region,cluster_name,cluster,update_mask\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\x99\x02\n\rDeleteCluster\x12..google.cloud.dataproc.v1.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xb8\x01\x82\xd3\xe4\x93\x02\x44*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\xca\x41J\n\x15google.protobuf.Empty\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xc9\x01\n\nGetCluster\x12+.google.cloud.dataproc.v1.GetClusterRequest\x1a!.google.cloud.dataproc.v1.Cluster"k\x82\xd3\xe4\x93\x02\x44\x12\x42/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\x12\xd9\x01\n\x0cListClusters\x12-.google.cloud.dataproc.v1.ListClustersRequest\x1a..google.cloud.dataproc.v1.ListClustersResponse"j\x82\xd3\xe4\x93\x02\x35\x12\x33/v1/projects/{project_id}/regions/{region}/clusters\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\xaa\x02\n\x0f\x44iagnoseCluster\x12\x30.google.cloud.dataproc.v1.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xc5\x01\x82\xd3\xe4\x93\x02P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41\x1eproject_id,region,cluster_name\xca\x41K\n\x16\x44iagnoseClusterResults\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBq\n\x1c\x63om.google.cloud.dataproc.v1B\rClustersProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -3355,11 +3355,11 @@ completed. By default, executables are run on master and all worker nodes. You can test a node’s ``role`` metadata to run an executable on a master or worker node, as shown below using - ``curl`` (you can also use ``wget``): :: ROLE=$(curl -H - Metadata-Flavor:Google http://metadata/computeMetadata/v1/i - nstance/attributes/dataproc-role) if [[ "${ROLE}" == - 'Master' ]]; then ... master specific actions ... else - ... worker specific actions ... fi + ``curl`` (you can also use ``wget``): ROLE=\ :math:`(curl -H + Metadata-Flavor:Google http://metadata/computeMetadata/v1/ins + tance/attributes/dataproc-role) if [[ "`\ {ROLE}" == ‘Master’ + ]]; then … master specific actions … else … worker specific + actions … fi encryption_config: Optional. Encryption settings for the cluster. autoscaling_config: @@ -4364,7 +4364,7 @@ serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", create_key=_descriptor._internal_create_key, serialized_start=6120, - serialized_end=7755, + serialized_end=7783, methods=[ _descriptor.MethodDescriptor( name="CreateCluster", @@ -4423,7 +4423,7 @@ containing_service=None, input_type=_DIAGNOSECLUSTERREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\001*\332A\036project_id,region,cluster_name\312A/\n\025google.protobuf.Empty\022\026DiagnoseClusterResults', + serialized_options=b'\202\323\344\223\002P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\001*\332A\036project_id,region,cluster_name\312AK\n\026DiagnoseClusterResults\0221google.cloud.dataproc.v1.ClusterOperationMetadata', create_key=_descriptor._internal_create_key, ), ], diff --git a/google/cloud/dataproc_v1/proto/jobs_pb2.py b/google/cloud/dataproc_v1/proto/jobs_pb2.py index 043c310e..6fd028e6 100644 --- a/google/cloud/dataproc_v1/proto/jobs_pb2.py +++ b/google/cloud/dataproc_v1/proto/jobs_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1/proto/jobs.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1/proto/operations_pb2.py b/google/cloud/dataproc_v1/proto/operations_pb2.py index bb28ccc3..f8ed3ca8 100644 --- a/google/cloud/dataproc_v1/proto/operations_pb2.py +++ b/google/cloud/dataproc_v1/proto/operations_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1/proto/operations.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1/proto/shared_pb2.py b/google/cloud/dataproc_v1/proto/shared_pb2.py index c37e8647..2b5e305d 100644 --- a/google/cloud/dataproc_v1/proto/shared_pb2.py +++ b/google/cloud/dataproc_v1/proto/shared_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1/proto/shared.proto -"""Generated protocol buffer code.""" + from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py b/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py index 362ed4b6..492ba11a 100644 --- a/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py +++ b/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1/proto/workflow_templates.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py b/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py index 53ca474b..3274e972 100644 --- a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py +++ b/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py @@ -2,45 +2,136 @@ "interfaces": { "google.cloud.dataproc.v1beta2.AutoscalingPolicyService": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], + "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "retry_policy_4_codes": ["UNAVAILABLE"], + "retry_policy_6_codes": ["UNAVAILABLE"], + "no_retry_codes": [], + "retry_policy_3_codes": [ + "INTERNAL", + "DEADLINE_EXCEEDED", + "UNAVAILABLE", + ], + "retry_policy_2_codes": ["UNAVAILABLE"], + "no_retry_1_codes": [], + "retry_policy_5_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], + "retry_policy_7_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 600000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, + "max_rpc_timeout_millis": 600000, "total_timeout_millis": 600000, - } + }, + "retry_policy_6_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "retry_policy_2_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_3_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_7_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "retry_policy_5_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000, + }, + "retry_policy_4_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, }, "methods": { "CreateAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "UpdateAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "GetAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListAutoscalingPolicies": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "DeleteAutoscalingPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, }, } diff --git a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py index 02be88be..bdc99bf7 100644 --- a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py +++ b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py @@ -610,6 +610,95 @@ def delete_cluster( metadata_type=proto_operations_pb2.ClusterOperationMetadata, ) + def diagnose_cluster( + self, + project_id, + region, + cluster_name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets cluster diagnostic information. The returned + ``Operation.metadata`` will be + `ClusterOperationMetadata `__. + After the operation completes, ``Operation.response`` contains + ``Empty``. + + Example: + >>> from google.cloud import dataproc_v1beta2 + >>> + >>> client = dataproc_v1beta2.ClusterControllerClient() + >>> + >>> # TODO: Initialize `project_id`: + >>> project_id = '' + >>> + >>> # TODO: Initialize `region`: + >>> region = '' + >>> + >>> # TODO: Initialize `cluster_name`: + >>> cluster_name = '' + >>> + >>> response = client.diagnose_cluster(project_id, region, cluster_name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster + belongs to. + region (str): Required. The Dataproc region in which to handle the request. + cluster_name (str): Required. The cluster name. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "diagnose_cluster" not in self._inner_api_calls: + self._inner_api_calls[ + "diagnose_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.diagnose_cluster, + default_retry=self._method_configs["DiagnoseCluster"].retry, + default_timeout=self._method_configs["DiagnoseCluster"].timeout, + client_info=self._client_info, + ) + + request = clusters_pb2.DiagnoseClusterRequest( + project_id=project_id, region=region, cluster_name=cluster_name, + ) + operation = self._inner_api_calls["diagnose_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.ClusterOperationMetadata, + ) + def get_cluster( self, project_id, @@ -797,92 +886,3 @@ def list_clusters( response_token_field="next_page_token", ) return iterator - - def diagnose_cluster( - self, - project_id, - region, - cluster_name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - ``Empty``. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.diagnose_cluster(project_id, region, cluster_name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "diagnose_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "diagnose_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.diagnose_cluster, - default_retry=self._method_configs["DiagnoseCluster"].retry, - default_timeout=self._method_configs["DiagnoseCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name, - ) - operation = self._inner_api_calls["diagnose_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) diff --git a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py index b2482807..43673451 100644 --- a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py +++ b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py @@ -2,50 +2,73 @@ "interfaces": { "google.cloud.dataproc.v1beta2.ClusterController": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent": ["UNAVAILABLE"], + "no_retry_codes": [], + "retry_policy_3_codes": [ + "INTERNAL", + "DEADLINE_EXCEEDED", + "UNAVAILABLE", + ], + "retry_policy_2_codes": ["UNAVAILABLE"], }, "retry_params": { - "default": { + "retry_policy_2_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 300000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_3_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, }, "methods": { "CreateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_2_codes", + "retry_params_name": "retry_policy_2_params", }, "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_2_codes", + "retry_params_name": "retry_policy_2_params", }, "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_2_codes", + "retry_params_name": "retry_policy_2_params", + }, + "DiagnoseCluster": { + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_2_codes", + "retry_params_name": "retry_policy_2_params", }, "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_3_codes", + "retry_params_name": "retry_policy_3_params", }, "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DiagnoseCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 300000, + "retry_codes_name": "retry_policy_3_codes", + "retry_params_name": "retry_policy_3_params", }, }, } diff --git a/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py b/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py index 99f95de7..f3f12304 100644 --- a/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py +++ b/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py @@ -279,6 +279,105 @@ def submit_job( request, retry=retry, timeout=timeout, metadata=metadata ) + def submit_job_as_operation( + self, + project_id, + region, + job, + request_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Submits job to a cluster. + + Example: + >>> from google.cloud import dataproc_v1beta2 + >>> + >>> client = dataproc_v1beta2.JobControllerClient() + >>> + >>> # TODO: Initialize `project_id`: + >>> project_id = '' + >>> + >>> # TODO: Initialize `region`: + >>> region = '' + >>> + >>> # TODO: Initialize `job`: + >>> job = {} + >>> + >>> response = client.submit_job_as_operation(project_id, region, job) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + project_id (str): Required. The ID of the Google Cloud Platform project that the job + belongs to. + region (str): Required. The Dataproc region in which to handle the request. + job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The job resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.dataproc_v1beta2.types.Job` + request_id (str): Optional. A unique id used to identify the request. If the server + receives two ``SubmitJobRequest`` requests with the same id, then the + second request will be ignored and the first ``Job`` created and stored + in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), underscores + (_), and hyphens (-). The maximum length is 40 characters. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "submit_job_as_operation" not in self._inner_api_calls: + self._inner_api_calls[ + "submit_job_as_operation" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.submit_job_as_operation, + default_retry=self._method_configs["SubmitJobAsOperation"].retry, + default_timeout=self._method_configs["SubmitJobAsOperation"].timeout, + client_info=self._client_info, + ) + + request = jobs_pb2.SubmitJobRequest( + project_id=project_id, region=region, job=job, request_id=request_id, + ) + operation = self._inner_api_calls["submit_job_as_operation"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + jobs_pb2.Job, + metadata_type=jobs_pb2.JobMetadata, + ) + def get_job( self, project_id, @@ -709,102 +808,3 @@ def delete_job( self._inner_api_calls["delete_job"]( request, retry=retry, timeout=timeout, metadata=metadata ) - - def submit_job_as_operation( - self, - project_id, - region, - job, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Submits job to a cluster. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> response = client.submit_job_as_operation(project_id, region, job) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The job resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.Job` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``SubmitJobRequest`` requests with the same id, then the - second request will be ignored and the first ``Job`` created and stored - in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "submit_job_as_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "submit_job_as_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.submit_job_as_operation, - default_retry=self._method_configs["SubmitJobAsOperation"].retry, - default_timeout=self._method_configs["SubmitJobAsOperation"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job, request_id=request_id, - ) - operation = self._inner_api_calls["submit_job_as_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - jobs_pb2.Job, - metadata_type=jobs_pb2.JobMetadata, - ) diff --git a/google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py b/google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py index d9e29c97..75561150 100644 --- a/google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py +++ b/google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py @@ -2,56 +2,146 @@ "interfaces": { "google.cloud.dataproc.v1beta2.JobController": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent2": [], - "non_idempotent": ["UNAVAILABLE"], + "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "retry_policy_4_codes": ["UNAVAILABLE"], + "retry_policy_6_codes": ["UNAVAILABLE"], + "no_retry_codes": [], + "retry_policy_3_codes": [ + "INTERNAL", + "DEADLINE_EXCEEDED", + "UNAVAILABLE", + ], + "retry_policy_2_codes": ["UNAVAILABLE"], + "no_retry_1_codes": [], + "retry_policy_5_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], + "retry_policy_7_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 600000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, + "max_rpc_timeout_millis": 600000, "total_timeout_millis": 600000, - } + }, + "retry_policy_6_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "retry_policy_2_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_3_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "retry_policy_7_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, + "retry_policy_5_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000, + }, + "retry_policy_4_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, + }, }, "methods": { "SubmitJob": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_4_codes", + "retry_params_name": "retry_policy_4_params", + }, + "SubmitJobAsOperation": { + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_4_codes", + "retry_params_name": "retry_policy_4_params", }, "GetJob": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_5_codes", + "retry_params_name": "retry_policy_5_params", }, "ListJobs": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_5_codes", + "retry_params_name": "retry_policy_5_params", }, "UpdateJob": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_4_codes", + "retry_params_name": "retry_policy_4_params", }, "CancelJob": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_5_codes", + "retry_params_name": "retry_policy_5_params", }, "DeleteJob": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SubmitJobAsOperation": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent2", - "retry_params_name": "default", + "timeout_millis": 900000, + "retry_codes_name": "retry_policy_4_codes", + "retry_params_name": "retry_policy_4_params", }, }, } diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py index b75460e7..c8bbc15c 100644 --- a/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py +++ b/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py @@ -161,44 +161,44 @@ def delete_cluster(self): return self._stubs["cluster_controller_stub"].DeleteCluster @property - def get_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.get_cluster`. + def diagnose_cluster(self): + """Return the gRPC stub for :meth:`ClusterControllerClient.diagnose_cluster`. - Gets the resource representation for a cluster in a project. + Gets cluster diagnostic information. The returned + ``Operation.metadata`` will be + `ClusterOperationMetadata `__. + After the operation completes, ``Operation.response`` contains + ``Empty``. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["cluster_controller_stub"].GetCluster + return self._stubs["cluster_controller_stub"].DiagnoseCluster @property - def list_clusters(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.list_clusters`. + def get_cluster(self): + """Return the gRPC stub for :meth:`ClusterControllerClient.get_cluster`. - Lists all regions/{region}/clusters in a project alphabetically. + Gets the resource representation for a cluster in a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["cluster_controller_stub"].ListClusters + return self._stubs["cluster_controller_stub"].GetCluster @property - def diagnose_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.diagnose_cluster`. + def list_clusters(self): + """Return the gRPC stub for :meth:`ClusterControllerClient.list_clusters`. - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - ``Empty``. + Lists all regions/{region}/clusters in a project alphabetically. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["cluster_controller_stub"].DiagnoseCluster + return self._stubs["cluster_controller_stub"].ListClusters diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py index 5babc364..8b941307 100644 --- a/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py +++ b/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py @@ -128,6 +128,19 @@ def submit_job(self): """ return self._stubs["job_controller_stub"].SubmitJob + @property + def submit_job_as_operation(self): + """Return the gRPC stub for :meth:`JobControllerClient.submit_job_as_operation`. + + Submits job to a cluster. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["job_controller_stub"].SubmitJobAsOperation + @property def get_job(self): """Return the gRPC stub for :meth:`JobControllerClient.get_job`. @@ -197,16 +210,3 @@ def delete_job(self): deserialized response object. """ return self._stubs["job_controller_stub"].DeleteJob - - @property - def submit_job_as_operation(self): - """Return the gRPC stub for :meth:`JobControllerClient.submit_job_as_operation`. - - Submits job to a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].SubmitJobAsOperation diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py index 3bb89db0..d2738246 100644 --- a/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py +++ b/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py @@ -117,35 +117,6 @@ def channel(self): """ return self._channel - @property - def create_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.create_workflow_template`. - - Creates new workflow template. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate - - @property - def get_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.get_workflow_template`. - - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate - @property def instantiate_workflow_template(self): """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.instantiate_workflow_template`. @@ -208,6 +179,35 @@ def instantiate_inline_workflow_template(self): "workflow_template_service_stub" ].InstantiateInlineWorkflowTemplate + @property + def create_workflow_template(self): + """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.create_workflow_template`. + + Creates new workflow template. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate + + @property + def get_workflow_template(self): + """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.get_workflow_template`. + + Retrieves the latest workflow template. + + Can retrieve previously instantiated template by specifying optional + version parameter. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate + @property def update_workflow_template(self): """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.update_workflow_template`. diff --git a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py index 59a02bdb..b77b32e5 100644 --- a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py +++ b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py @@ -230,183 +230,6 @@ def __init__( self._inner_api_calls = {} # Service calls - def create_workflow_template( - self, - parent, - template, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates new workflow template. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.create_workflow_template(parent, template) - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "create_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_workflow_template, - default_retry=self._method_configs["CreateWorkflowTemplate"].retry, - default_timeout=self._method_configs["CreateWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_workflow_template( - self, - name, - version=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.get_workflow_template(name) - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the resource name of - the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the resource name - of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to retrieve. Only previously - instantiated versions can be retrieved. - - If unspecified, retrieves the current version. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "get_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_workflow_template, - default_retry=self._method_configs["GetWorkflowTemplate"].retry, - default_timeout=self._method_configs["GetWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.GetWorkflowTemplateRequest( - name=name, version=version, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - def instantiate_workflow_template( self, name, @@ -687,6 +510,183 @@ def instantiate_inline_workflow_template( metadata_type=workflow_templates_pb2.WorkflowMetadata, ) + def create_workflow_template( + self, + parent, + template, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Creates new workflow template. + + Example: + >>> from google.cloud import dataproc_v1beta2 + >>> + >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() + >>> + >>> parent = client.region_path('[PROJECT]', '[REGION]') + >>> + >>> # TODO: Initialize `template`: + >>> template = {} + >>> + >>> response = client.create_workflow_template(parent, template) + + Args: + parent (str): Required. The resource name of the region or location, as described + in https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, the resource name + of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, the resource + name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_workflow_template" not in self._inner_api_calls: + self._inner_api_calls[ + "create_workflow_template" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_workflow_template, + default_retry=self._method_configs["CreateWorkflowTemplate"].retry, + default_timeout=self._method_configs["CreateWorkflowTemplate"].timeout, + client_info=self._client_info, + ) + + request = workflow_templates_pb2.CreateWorkflowTemplateRequest( + parent=parent, template=template, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["create_workflow_template"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def get_workflow_template( + self, + name, + version=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Retrieves the latest workflow template. + + Can retrieve previously instantiated template by specifying optional + version parameter. + + Example: + >>> from google.cloud import dataproc_v1beta2 + >>> + >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() + >>> + >>> # TODO: Initialize `name`: + >>> name = '' + >>> + >>> response = client.get_workflow_template(name) + + Args: + name (str): Required. The resource name of the workflow template, as described + in https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the resource name of + the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the resource name + of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): Optional. The version of workflow template to retrieve. Only previously + instantiated versions can be retrieved. + + If unspecified, retrieves the current version. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_workflow_template" not in self._inner_api_calls: + self._inner_api_calls[ + "get_workflow_template" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_workflow_template, + default_retry=self._method_configs["GetWorkflowTemplate"].retry, + default_timeout=self._method_configs["GetWorkflowTemplate"].timeout, + client_info=self._client_info, + ) + + request = workflow_templates_pb2.GetWorkflowTemplateRequest( + name=name, version=version, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_workflow_template"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + def update_workflow_template( self, template, diff --git a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py index e9e1031a..b086ceb1 100644 --- a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py +++ b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py @@ -2,55 +2,78 @@ "interfaces": { "google.cloud.dataproc.v1beta2.WorkflowTemplateService": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent": ["UNAVAILABLE"], + "retry_policy_6_codes": ["UNAVAILABLE"], + "no_retry_codes": [], + "retry_policy_7_codes": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE", + ], }, "retry_params": { - "default": { + "retry_policy_6_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 600000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, + "max_rpc_timeout_millis": 600000, "total_timeout_millis": 600000, - } - }, - "methods": { - "CreateWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", }, - "GetWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_policy_7_params": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 600000, + "total_timeout_millis": 600000, }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + }, + "methods": { "InstantiateWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_6_codes", + "retry_params_name": "retry_policy_6_params", }, "InstantiateInlineWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_6_codes", + "retry_params_name": "retry_policy_6_params", + }, + "CreateWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_6_codes", + "retry_params_name": "retry_policy_6_params", + }, + "GetWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_7_codes", + "retry_params_name": "retry_policy_7_params", }, "UpdateWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_6_codes", + "retry_params_name": "retry_policy_6_params", }, "ListWorkflowTemplates": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_7_codes", + "retry_params_name": "retry_policy_7_params", }, "DeleteWorkflowTemplate": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 600000, + "retry_codes_name": "retry_policy_6_codes", + "retry_params_name": "retry_policy_6_params", }, }, } diff --git a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py index 15bb4b01..ac8b00ac 100644 --- a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py b/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py index 83a52396..7e0a1364 100644 --- a/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1beta2/proto/clusters.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py b/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py index 20bfb62c..c8affffc 100644 --- a/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1beta2/proto/jobs.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1beta2/proto/operations_pb2.py b/google/cloud/dataproc_v1beta2/proto/operations_pb2.py index 4f40b6da..a4187389 100644 --- a/google/cloud/dataproc_v1beta2/proto/operations_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/operations_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1beta2/proto/operations.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/google/cloud/dataproc_v1beta2/proto/shared_pb2.py b/google/cloud/dataproc_v1beta2/proto/shared_pb2.py index f2b188de..136a7be9 100644 --- a/google/cloud/dataproc_v1beta2/proto/shared_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/shared_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1beta2/proto/shared.proto -"""Generated protocol buffer code.""" + from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py index 86539014..4f61fcb6 100644 --- a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/dataproc_v1beta2/proto/workflow_templates.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/synth.metadata b/synth.metadata index 6f5d8e77..f2ff1e3d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,23 +3,23 @@ { "git": { "name": ".", - "remote": "https://github.com/googleapis/python-dataproc.git", - "sha": "9b025ab62582e9c85d744159534e32ed63f2dce2" + "remote": "git@github.com:googleapis/python-dataproc.git", + "sha": "f0c8897e5124a553fb66ef20d9cd55d2ed912a6a" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "5824c4db2d0a3cb5485cb7f4b68cbc2598758d2d", - "internalRef": "318081240" + "sha": "bad4b831900d70e69b5e4d43bd7565d0aaded997", + "internalRef": "321584556" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "303271797a360f8a439203413f13a160f2f5b3b4" + "sha": "21f1470ecd01424dc91c70f1a7c798e4e87d1eec" } } ], diff --git a/tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py b/tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py index 17622a06..83736be4 100644 --- a/tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py +++ b/tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py @@ -61,7 +61,7 @@ class CustomException(Exception): class TestAutoscalingPolicyServiceClient(object): - def test_update_autoscaling_policy(self): + def test_create_autoscaling_policy(self): # Setup Expected Response id_ = "id3355" name = "name3373707" @@ -78,19 +78,20 @@ def test_update_autoscaling_policy(self): client = dataproc_v1.AutoscalingPolicyServiceClient() # Setup Request + parent = client.region_path("[PROJECT]", "[REGION]") policy = {} - response = client.update_autoscaling_policy(policy) + response = client.create_autoscaling_policy(parent, policy) assert expected_response == response assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( - policy=policy + expected_request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( + parent=parent, policy=policy ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_update_autoscaling_policy_exception(self): + def test_create_autoscaling_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -99,12 +100,13 @@ def test_update_autoscaling_policy_exception(self): client = dataproc_v1.AutoscalingPolicyServiceClient() # Setup request + parent = client.region_path("[PROJECT]", "[REGION]") policy = {} with pytest.raises(CustomException): - client.update_autoscaling_policy(policy) + client.create_autoscaling_policy(parent, policy) - def test_create_autoscaling_policy(self): + def test_update_autoscaling_policy(self): # Setup Expected Response id_ = "id3355" name = "name3373707" @@ -121,20 +123,19 @@ def test_create_autoscaling_policy(self): client = dataproc_v1.AutoscalingPolicyServiceClient() # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") policy = {} - response = client.create_autoscaling_policy(parent, policy) + response = client.update_autoscaling_policy(policy) assert expected_response == response assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( - parent=parent, policy=policy + expected_request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( + policy=policy ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_autoscaling_policy_exception(self): + def test_update_autoscaling_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -143,11 +144,10 @@ def test_create_autoscaling_policy_exception(self): client = dataproc_v1.AutoscalingPolicyServiceClient() # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") policy = {} with pytest.raises(CustomException): - client.create_autoscaling_policy(parent, policy) + client.update_autoscaling_policy(policy) def test_get_autoscaling_policy(self): # Setup Expected Response diff --git a/tests/unit/gapic/v1/test_cluster_controller_client_v1.py b/tests/unit/gapic/v1/test_cluster_controller_client_v1.py index 1c15fdcf..81591382 100644 --- a/tests/unit/gapic/v1/test_cluster_controller_client_v1.py +++ b/tests/unit/gapic/v1/test_cluster_controller_client_v1.py @@ -259,6 +259,63 @@ def test_delete_cluster_exception(self): exception = response.exception() assert exception.errors[0] == error + def test_diagnose_cluster(self): + # Setup Expected Response + output_uri = "outputUri-1273518802" + expected_response = {"output_uri": output_uri} + expected_response = clusters_pb2.DiagnoseClusterResults(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_diagnose_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.ClusterControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + cluster_name = "clusterName-1018081872" + + response = client.diagnose_cluster(project_id, region, cluster_name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = clusters_pb2.DiagnoseClusterRequest( + project_id=project_id, region=region, cluster_name=cluster_name + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_diagnose_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_diagnose_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.ClusterControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + cluster_name = "clusterName-1018081872" + + response = client.diagnose_cluster(project_id, region, cluster_name) + exception = response.exception() + assert exception.errors[0] == error + def test_get_cluster(self): # Setup Expected Response project_id_2 = "projectId2939242356" @@ -355,59 +412,3 @@ def test_list_clusters_exception(self): paged_list_response = client.list_clusters(project_id, region) with pytest.raises(CustomException): list(paged_list_response) - - def test_diagnose_cluster(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_diagnose_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - exception = response.exception() - assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1/test_job_controller_client_v1.py b/tests/unit/gapic/v1/test_job_controller_client_v1.py index bc9ff0f9..4d777faf 100644 --- a/tests/unit/gapic/v1/test_job_controller_client_v1.py +++ b/tests/unit/gapic/v1/test_job_controller_client_v1.py @@ -117,6 +117,71 @@ def test_submit_job_exception(self): with pytest.raises(CustomException): client.submit_job(project_id, region, job) + def test_submit_job_as_operation(self): + # Setup Expected Response + driver_output_resource_uri = "driverOutputResourceUri-542229086" + driver_control_files_uri = "driverControlFilesUri207057643" + job_uuid = "jobUuid-1615012099" + done = True + expected_response = { + "driver_output_resource_uri": driver_output_resource_uri, + "driver_control_files_uri": driver_control_files_uri, + "job_uuid": job_uuid, + "done": done, + } + expected_response = jobs_pb2.Job(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_submit_job_as_operation", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.JobControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + job = {} + + response = client.submit_job_as_operation(project_id, region, job) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = jobs_pb2.SubmitJobRequest( + project_id=project_id, region=region, job=job + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_submit_job_as_operation_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_submit_job_as_operation_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.JobControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + job = {} + + response = client.submit_job_as_operation(project_id, region, job) + exception = response.exception() + assert exception.errors[0] == error + def test_get_job(self): # Setup Expected Response driver_output_resource_uri = "driverOutputResourceUri-542229086" @@ -364,68 +429,3 @@ def test_delete_job_exception(self): with pytest.raises(CustomException): client.delete_job(project_id, region, job_id) - - def test_submit_job_as_operation(self): - # Setup Expected Response - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_submit_job_as_operation_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - exception = response.exception() - assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1/test_workflow_template_service_client_v1.py b/tests/unit/gapic/v1/test_workflow_template_service_client_v1.py index 764e38fb..c63831ca 100644 --- a/tests/unit/gapic/v1/test_workflow_template_service_client_v1.py +++ b/tests/unit/gapic/v1/test_workflow_template_service_client_v1.py @@ -64,90 +64,6 @@ class CustomException(Exception): class TestWorkflowTemplateServiceClient(object): - def test_create_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - version = 351608024 - expected_response = {"id": id_, "name": name, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.create_workflow_template(parent, template) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - with pytest.raises(CustomException): - client.create_workflow_template(parent, template) - - def test_get_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name_2 = "name2-1052831874" - version = 351608024 - expected_response = {"id": id_, "name": name_2, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.get_workflow_template(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.GetWorkflowTemplateRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.get_workflow_template(name) - def test_instantiate_workflow_template(self): # Setup Expected Response expected_response = {} @@ -255,6 +171,90 @@ def test_instantiate_inline_workflow_template_exception(self): exception = response.exception() assert exception.errors[0] == error + def test_create_workflow_template(self): + # Setup Expected Response + id_ = "id3355" + name = "name3373707" + version = 351608024 + expected_response = {"id": id_, "name": name, "version": version} + expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.WorkflowTemplateServiceClient() + + # Setup Request + parent = client.region_path("[PROJECT]", "[REGION]") + template = {} + + response = client.create_workflow_template(parent, template) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = workflow_templates_pb2.CreateWorkflowTemplateRequest( + parent=parent, template=template + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_workflow_template_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.WorkflowTemplateServiceClient() + + # Setup request + parent = client.region_path("[PROJECT]", "[REGION]") + template = {} + + with pytest.raises(CustomException): + client.create_workflow_template(parent, template) + + def test_get_workflow_template(self): + # Setup Expected Response + id_ = "id3355" + name_2 = "name2-1052831874" + version = 351608024 + expected_response = {"id": id_, "name": name_2, "version": version} + expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.WorkflowTemplateServiceClient() + + # Setup Request + name = "name3373707" + + response = client.get_workflow_template(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = workflow_templates_pb2.GetWorkflowTemplateRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_workflow_template_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1.WorkflowTemplateServiceClient() + + # Setup request + name = "name3373707" + + with pytest.raises(CustomException): + client.get_workflow_template(name) + def test_update_workflow_template(self): # Setup Expected Response id_ = "id3355" diff --git a/tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py index 7c75dc57..cb4d14ad 100644 --- a/tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py +++ b/tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py @@ -259,6 +259,62 @@ def test_delete_cluster_exception(self): exception = response.exception() assert exception.errors[0] == error + def test_diagnose_cluster(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_diagnose_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.ClusterControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + cluster_name = "clusterName-1018081872" + + response = client.diagnose_cluster(project_id, region, cluster_name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = clusters_pb2.DiagnoseClusterRequest( + project_id=project_id, region=region, cluster_name=cluster_name + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_diagnose_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_diagnose_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.ClusterControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + cluster_name = "clusterName-1018081872" + + response = client.diagnose_cluster(project_id, region, cluster_name) + exception = response.exception() + assert exception.errors[0] == error + def test_get_cluster(self): # Setup Expected Response project_id_2 = "projectId2939242356" @@ -355,59 +411,3 @@ def test_list_clusters_exception(self): paged_list_response = client.list_clusters(project_id, region) with pytest.raises(CustomException): list(paged_list_response) - - def test_diagnose_cluster(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_diagnose_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - exception = response.exception() - assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py index 84ce3d3d..57dbcbe8 100644 --- a/tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py +++ b/tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py @@ -119,6 +119,73 @@ def test_submit_job_exception(self): with pytest.raises(CustomException): client.submit_job(project_id, region, job) + def test_submit_job_as_operation(self): + # Setup Expected Response + submitted_by = "submittedBy-2047729125" + driver_output_resource_uri = "driverOutputResourceUri-542229086" + driver_control_files_uri = "driverControlFilesUri207057643" + job_uuid = "jobUuid-1615012099" + done = True + expected_response = { + "submitted_by": submitted_by, + "driver_output_resource_uri": driver_output_resource_uri, + "driver_control_files_uri": driver_control_files_uri, + "job_uuid": job_uuid, + "done": done, + } + expected_response = jobs_pb2.Job(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_submit_job_as_operation", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.JobControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + job = {} + + response = client.submit_job_as_operation(project_id, region, job) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = jobs_pb2.SubmitJobRequest( + project_id=project_id, region=region, job=job + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_submit_job_as_operation_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_submit_job_as_operation_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.JobControllerClient() + + # Setup Request + project_id = "projectId-1969970175" + region = "region-934795532" + job = {} + + response = client.submit_job_as_operation(project_id, region, job) + exception = response.exception() + assert exception.errors[0] == error + def test_get_job(self): # Setup Expected Response submitted_by = "submittedBy-2047729125" @@ -372,70 +439,3 @@ def test_delete_job_exception(self): with pytest.raises(CustomException): client.delete_job(project_id, region, job_id) - - def test_submit_job_as_operation(self): - # Setup Expected Response - submitted_by = "submittedBy-2047729125" - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "submitted_by": submitted_by, - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_submit_job_as_operation_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - exception = response.exception() - assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py index d3017008..f8ff56f9 100644 --- a/tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py +++ b/tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py @@ -64,90 +64,6 @@ class CustomException(Exception): class TestWorkflowTemplateServiceClient(object): - def test_create_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - version = 351608024 - expected_response = {"id": id_, "name": name, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.create_workflow_template(parent, template) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - with pytest.raises(CustomException): - client.create_workflow_template(parent, template) - - def test_get_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name_2 = "name2-1052831874" - version = 351608024 - expected_response = {"id": id_, "name": name_2, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.get_workflow_template(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.GetWorkflowTemplateRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.get_workflow_template(name) - def test_instantiate_workflow_template(self): # Setup Expected Response expected_response = {} @@ -255,6 +171,90 @@ def test_instantiate_inline_workflow_template_exception(self): exception = response.exception() assert exception.errors[0] == error + def test_create_workflow_template(self): + # Setup Expected Response + id_ = "id3355" + name = "name3373707" + version = 351608024 + expected_response = {"id": id_, "name": name, "version": version} + expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.WorkflowTemplateServiceClient() + + # Setup Request + parent = client.region_path("[PROJECT]", "[REGION]") + template = {} + + response = client.create_workflow_template(parent, template) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = workflow_templates_pb2.CreateWorkflowTemplateRequest( + parent=parent, template=template + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_workflow_template_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.WorkflowTemplateServiceClient() + + # Setup request + parent = client.region_path("[PROJECT]", "[REGION]") + template = {} + + with pytest.raises(CustomException): + client.create_workflow_template(parent, template) + + def test_get_workflow_template(self): + # Setup Expected Response + id_ = "id3355" + name_2 = "name2-1052831874" + version = 351608024 + expected_response = {"id": id_, "name": name_2, "version": version} + expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.WorkflowTemplateServiceClient() + + # Setup Request + name = "name3373707" + + response = client.get_workflow_template(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = workflow_templates_pb2.GetWorkflowTemplateRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_workflow_template_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = dataproc_v1beta2.WorkflowTemplateServiceClient() + + # Setup request + name = "name3373707" + + with pytest.raises(CustomException): + client.get_workflow_template(name) + def test_update_workflow_template(self): # Setup Expected Response id_ = "id3355"