Skip to content
This repository has been archived by the owner on Nov 29, 2023. It is now read-only.

Commit

Permalink
feat: add Dataproc Serverless for Spark Batches API (#290)
Browse files Browse the repository at this point in the history
* feat: add Dataproc Serverless for Spark Batches API

Committer: @medb
PiperOrigin-RevId: 402631995

Source-Link: googleapis/googleapis@95af2e4

Source-Link: googleapis/googleapis-gen@0ee7abd
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGVlN2FiZDllY2QyOTUxZTk1ODMwMzY4MWE0YjI1MWE5NDgxMDdiNiJ9

* 🦉 Updates from OwlBot

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
gcf-owl-bot[bot] and gcf-owl-bot[bot] committed Oct 12, 2021
1 parent 15a4471 commit f0ed26c
Show file tree
Hide file tree
Showing 13 changed files with 321 additions and 40 deletions.
18 changes: 18 additions & 0 deletions google/cloud/dataproc/__init__.py
Expand Up @@ -71,6 +71,7 @@
from google.cloud.dataproc_v1.types.clusters import ClusterConfig
from google.cloud.dataproc_v1.types.clusters import ClusterMetrics
from google.cloud.dataproc_v1.types.clusters import ClusterStatus
from google.cloud.dataproc_v1.types.clusters import ConfidentialInstanceConfig
from google.cloud.dataproc_v1.types.clusters import CreateClusterRequest
from google.cloud.dataproc_v1.types.clusters import DeleteClusterRequest
from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterRequest
Expand Down Expand Up @@ -122,9 +123,17 @@
from google.cloud.dataproc_v1.types.jobs import SubmitJobRequest
from google.cloud.dataproc_v1.types.jobs import UpdateJobRequest
from google.cloud.dataproc_v1.types.jobs import YarnApplication
from google.cloud.dataproc_v1.types.operations import BatchOperationMetadata
from google.cloud.dataproc_v1.types.operations import ClusterOperationMetadata
from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus
from google.cloud.dataproc_v1.types.shared import EnvironmentConfig
from google.cloud.dataproc_v1.types.shared import ExecutionConfig
from google.cloud.dataproc_v1.types.shared import PeripheralsConfig
from google.cloud.dataproc_v1.types.shared import RuntimeConfig
from google.cloud.dataproc_v1.types.shared import RuntimeInfo
from google.cloud.dataproc_v1.types.shared import SparkHistoryServerConfig
from google.cloud.dataproc_v1.types.shared import Component
from google.cloud.dataproc_v1.types.shared import FailureAction
from google.cloud.dataproc_v1.types.workflow_templates import ClusterOperation
from google.cloud.dataproc_v1.types.workflow_templates import ClusterSelector
from google.cloud.dataproc_v1.types.workflow_templates import (
Expand Down Expand Up @@ -186,6 +195,7 @@
"ClusterConfig",
"ClusterMetrics",
"ClusterStatus",
"ConfidentialInstanceConfig",
"CreateClusterRequest",
"DeleteClusterRequest",
"DiagnoseClusterRequest",
Expand Down Expand Up @@ -237,9 +247,17 @@
"SubmitJobRequest",
"UpdateJobRequest",
"YarnApplication",
"BatchOperationMetadata",
"ClusterOperationMetadata",
"ClusterOperationStatus",
"EnvironmentConfig",
"ExecutionConfig",
"PeripheralsConfig",
"RuntimeConfig",
"RuntimeInfo",
"SparkHistoryServerConfig",
"Component",
"FailureAction",
"ClusterOperation",
"ClusterSelector",
"CreateWorkflowTemplateRequest",
Expand Down
18 changes: 18 additions & 0 deletions google/cloud/dataproc_v1/__init__.py
Expand Up @@ -39,6 +39,7 @@
from .types.clusters import ClusterConfig
from .types.clusters import ClusterMetrics
from .types.clusters import ClusterStatus
from .types.clusters import ConfidentialInstanceConfig
from .types.clusters import CreateClusterRequest
from .types.clusters import DeleteClusterRequest
from .types.clusters import DiagnoseClusterRequest
Expand Down Expand Up @@ -90,9 +91,17 @@
from .types.jobs import SubmitJobRequest
from .types.jobs import UpdateJobRequest
from .types.jobs import YarnApplication
from .types.operations import BatchOperationMetadata
from .types.operations import ClusterOperationMetadata
from .types.operations import ClusterOperationStatus
from .types.shared import EnvironmentConfig
from .types.shared import ExecutionConfig
from .types.shared import PeripheralsConfig
from .types.shared import RuntimeConfig
from .types.shared import RuntimeInfo
from .types.shared import SparkHistoryServerConfig
from .types.shared import Component
from .types.shared import FailureAction
from .types.workflow_templates import ClusterOperation
from .types.workflow_templates import ClusterSelector
from .types.workflow_templates import CreateWorkflowTemplateRequest
Expand Down Expand Up @@ -126,6 +135,7 @@
"AutoscalingPolicyServiceClient",
"BasicAutoscalingAlgorithm",
"BasicYarnAutoscalingConfig",
"BatchOperationMetadata",
"CancelJobRequest",
"Cluster",
"ClusterConfig",
Expand All @@ -137,6 +147,7 @@
"ClusterSelector",
"ClusterStatus",
"Component",
"ConfidentialInstanceConfig",
"CreateAutoscalingPolicyRequest",
"CreateClusterRequest",
"CreateWorkflowTemplateRequest",
Expand All @@ -149,6 +160,9 @@
"DiskConfig",
"EncryptionConfig",
"EndpointConfig",
"EnvironmentConfig",
"ExecutionConfig",
"FailureAction",
"GceClusterConfig",
"GetAutoscalingPolicyRequest",
"GetClusterRequest",
Expand Down Expand Up @@ -187,15 +201,19 @@
"NodeInitializationAction",
"OrderedJob",
"ParameterValidation",
"PeripheralsConfig",
"PigJob",
"PrestoJob",
"PySparkJob",
"QueryList",
"RegexValidation",
"ReservationAffinity",
"RuntimeConfig",
"RuntimeInfo",
"SecurityConfig",
"ShieldedInstanceConfig",
"SoftwareConfig",
"SparkHistoryServerConfig",
"SparkJob",
"SparkRJob",
"SparkSqlJob",
Expand Down
Expand Up @@ -220,7 +220,7 @@ async def create_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -292,6 +292,9 @@ async def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
The cluster must be in a
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
state or an error is returned.
Args:
request (:class:`google.cloud.dataproc_v1.types.UpdateClusterRequest`):
Expand Down Expand Up @@ -398,7 +401,7 @@ async def update_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -483,7 +486,7 @@ async def stop_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -535,7 +538,7 @@ async def start_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -727,7 +730,7 @@ async def get_cluster(
google.cloud.dataproc_v1.types.Cluster:
Describes the identifying
information, config, and status of a
cluster of Compute Engine instances.
Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down
13 changes: 8 additions & 5 deletions google/cloud/dataproc_v1/services/cluster_controller/client.py
Expand Up @@ -422,7 +422,7 @@ def create_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -485,6 +485,9 @@ def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
The cluster must be in a
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
state or an error is returned.
Args:
request (Union[google.cloud.dataproc_v1.types.UpdateClusterRequest, dict]):
Expand Down Expand Up @@ -591,7 +594,7 @@ def update_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -667,7 +670,7 @@ def stop_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -720,7 +723,7 @@ def start_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a cluster of Compute Engine instances.
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down Expand Up @@ -904,7 +907,7 @@ def get_cluster(
google.cloud.dataproc_v1.types.Cluster:
Describes the identifying
information, config, and status of a
cluster of Compute Engine instances.
Dataproc cluster
"""
# Create or coerce a protobuf request object.
Expand Down
Expand Up @@ -282,6 +282,9 @@ def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
The cluster must be in a
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
state or an error is returned.
Returns:
Callable[[~.UpdateClusterRequest],
Expand Down
Expand Up @@ -287,6 +287,9 @@ def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
The cluster must be in a
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
state or an error is returned.
Returns:
Callable[[~.UpdateClusterRequest],
Expand Down
21 changes: 21 additions & 0 deletions google/cloud/dataproc_v1/types/__init__.py
Expand Up @@ -32,6 +32,7 @@
ClusterConfig,
ClusterMetrics,
ClusterStatus,
ConfidentialInstanceConfig,
CreateClusterRequest,
DeleteClusterRequest,
DiagnoseClusterRequest,
Expand Down Expand Up @@ -87,9 +88,20 @@
YarnApplication,
)
from .operations import (
BatchOperationMetadata,
ClusterOperationMetadata,
ClusterOperationStatus,
)
from .shared import (
EnvironmentConfig,
ExecutionConfig,
PeripheralsConfig,
RuntimeConfig,
RuntimeInfo,
SparkHistoryServerConfig,
Component,
FailureAction,
)
from .workflow_templates import (
ClusterOperation,
ClusterSelector,
Expand Down Expand Up @@ -131,6 +143,7 @@
"ClusterConfig",
"ClusterMetrics",
"ClusterStatus",
"ConfidentialInstanceConfig",
"CreateClusterRequest",
"DeleteClusterRequest",
"DiagnoseClusterRequest",
Expand Down Expand Up @@ -182,9 +195,17 @@
"SubmitJobRequest",
"UpdateJobRequest",
"YarnApplication",
"BatchOperationMetadata",
"ClusterOperationMetadata",
"ClusterOperationStatus",
"EnvironmentConfig",
"ExecutionConfig",
"PeripheralsConfig",
"RuntimeConfig",
"RuntimeInfo",
"SparkHistoryServerConfig",
"Component",
"FailureAction",
"ClusterOperation",
"ClusterSelector",
"CreateWorkflowTemplateRequest",
Expand Down
10 changes: 10 additions & 0 deletions google/cloud/dataproc_v1/types/autoscaling_policies.py
Expand Up @@ -67,6 +67,15 @@ class AutoscalingPolicy(proto.Message):
secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig):
Optional. Describes how the autoscaler will
operate for secondary workers.
labels (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy.LabelsEntry]):
Optional. The labels to associate with this autoscaling
policy. Label **keys** must contain 1 to 63 characters, and
must conform to `RFC
1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. Label
**values** may be empty, but, if present, must contain 1 to
63 characters, and must conform to `RFC
1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. No more than
32 labels can be associated with an autoscaling policy.
"""

id = proto.Field(proto.STRING, number=1,)
Expand All @@ -80,6 +89,7 @@ class AutoscalingPolicy(proto.Message):
secondary_worker_config = proto.Field(
proto.MESSAGE, number=5, message="InstanceGroupAutoscalingPolicyConfig",
)
labels = proto.MapField(proto.STRING, proto.STRING, number=6,)


class BasicAutoscalingAlgorithm(proto.Message):
Expand Down

0 comments on commit f0ed26c

Please sign in to comment.