From 8cbdde71016f910ac3da67bc9eb2aedbb7b0c870 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Fri, 9 Apr 2021 02:02:42 -0700 Subject: [PATCH] feat: update the Dataproc V1 API client library This includes the following: 1. The new start and stop cluster methods. 2. The ability to specify a metastore config in a cluster. 3. The ability to specify a (BETA) GKE cluster when creating a Dataproc cluster. 4. The ability to configure the behavior for private IPv6 cluster networking. 5. The ability to specify node affinity groups for clusters. 6. The ability to specify shielded VM configurations for clusters. 7. Support for service-account based secure multi-tenancy. 8. The ability to specify cluster labels for picking which cluster should run a job. 9. Components for DOCKER, DRUID, FLINK, HBASE, RANGER, and SOLR 10. The ability to specify a DAG timeout for workflows. Committer: @ojarjur PiperOrigin-RevId: 367518225 Source-Author: Google APIs Source-Date: Thu Apr 8 15:22:23 2021 -0700 Source-Repo: googleapis/googleapis Source-Sha: 439f098cb730af18c8ae4e65971ea3badf45138a Source-Link: https://github.com/googleapis/googleapis/commit/439f098cb730af18c8ae4e65971ea3badf45138a --- .../dataproc/v1/ClusterControllerClient.java | 166 ++ .../v1/ClusterControllerSettings.java | 44 + .../v1/WorkflowTemplateServiceClient.java | 4 +- .../cloud/dataproc/v1/gapic_metadata.json | 6 + .../v1/stub/ClusterControllerStub.java | 20 + .../stub/ClusterControllerStubSettings.java | 140 ++ .../v1/stub/GrpcClusterControllerStub.java | 95 + .../v1/ClusterControllerClientTest.java | 142 ++ .../v1/MockClusterControllerImpl.java | 41 + .../v1/WorkflowTemplateServiceClientTest.java | 7 + .../dataproc/v1/ClusterControllerGrpc.java | 231 ++- .../v1/WorkflowTemplateServiceGrpc.java | 12 +- .../cloud/dataproc/v1/ClusterConfig.java | 668 +++++++ .../dataproc/v1/ClusterConfigOrBuilder.java | 99 + .../google/cloud/dataproc/v1/ClusterName.java | 223 +++ .../cloud/dataproc/v1/ClusterStatus.java | 66 + .../cloud/dataproc/v1/ClustersProto.java | 642 ++++--- .../google/cloud/dataproc/v1/Component.java | 145 +- .../dataproc/v1/CreateClusterRequest.java | 42 +- .../v1/CreateClusterRequestOrBuilder.java | 12 +- .../dataproc/v1/DeleteClusterRequest.java | 35 +- .../v1/DeleteClusterRequestOrBuilder.java | 10 +- .../google/cloud/dataproc/v1/DiskConfig.java | 42 +- .../dataproc/v1/DiskConfigOrBuilder.java | 12 +- .../cloud/dataproc/v1/GceClusterConfig.java | 1025 +++++++++- .../v1/GceClusterConfigOrBuilder.java | 115 ++ .../cloud/dataproc/v1/GkeClusterConfig.java | 1690 +++++++++++++++++ .../v1/GkeClusterConfigOrBuilder.java | 68 + .../cloud/dataproc/v1/IdentityConfig.java | 829 ++++++++ .../dataproc/v1/IdentityConfigOrBuilder.java | 90 + .../dataproc/v1/InstanceGroupConfig.java | 24 +- .../v1/InstanceGroupConfigOrBuilder.java | 6 +- .../InstantiateWorkflowTemplateRequest.java | 22 +- ...tiateWorkflowTemplateRequestOrBuilder.java | 10 +- .../com/google/cloud/dataproc/v1/Job.java | 16 +- .../cloud/dataproc/v1/JobOrBuilder.java | 4 +- .../cloud/dataproc/v1/JobPlacement.java | 356 ++++ .../dataproc/v1/JobPlacementOrBuilder.java | 64 + .../cloud/dataproc/v1/JobReference.java | 28 +- .../dataproc/v1/JobReferenceOrBuilder.java | 8 +- .../cloud/dataproc/v1/JobScheduling.java | 106 +- .../dataproc/v1/JobSchedulingOrBuilder.java | 17 +- .../google/cloud/dataproc/v1/JobsProto.java | 264 +-- .../cloud/dataproc/v1/KerberosConfig.java | 56 +- .../dataproc/v1/KerberosConfigOrBuilder.java | 16 +- .../cloud/dataproc/v1/LifecycleConfig.java | 48 +- .../dataproc/v1/LifecycleConfigOrBuilder.java | 12 +- .../cloud/dataproc/v1/MetastoreConfig.java | 663 +++++++ .../dataproc/v1/MetastoreConfigOrBuilder.java | 58 + .../cloud/dataproc/v1/NodeGroupAffinity.java | 683 +++++++ .../v1/NodeGroupAffinityOrBuilder.java | 64 + .../google/cloud/dataproc/v1/OrderedJob.java | 28 +- .../dataproc/v1/OrderedJobOrBuilder.java | 8 +- .../google/cloud/dataproc/v1/QueryList.java | 78 +- .../cloud/dataproc/v1/QueryListOrBuilder.java | 24 +- .../cloud/dataproc/v1/SecurityConfig.java | 390 +++- .../dataproc/v1/SecurityConfigOrBuilder.java | 62 +- .../google/cloud/dataproc/v1/ServiceName.java | 223 +++ .../google/cloud/dataproc/v1/SharedProto.java | 19 +- .../dataproc/v1/ShieldedInstanceConfig.java | 728 +++++++ .../v1/ShieldedInstanceConfigOrBuilder.java | 64 + .../dataproc/v1/StartClusterRequest.java | 1428 ++++++++++++++ .../v1/StartClusterRequestOrBuilder.java | 172 ++ .../cloud/dataproc/v1/StopClusterRequest.java | 1428 ++++++++++++++ .../v1/StopClusterRequestOrBuilder.java | 172 ++ .../cloud/dataproc/v1/SubmitJobRequest.java | 35 +- .../v1/SubmitJobRequestOrBuilder.java | 10 +- .../cloud/dataproc/v1/TemplateParameter.java | 104 +- .../v1/TemplateParameterOrBuilder.java | 32 +- .../dataproc/v1/UpdateClusterRequest.java | 35 +- .../v1/UpdateClusterRequestOrBuilder.java | 10 +- .../cloud/dataproc/v1/WorkflowMetadata.java | 961 ++++++++++ .../v1/WorkflowMetadataOrBuilder.java | 138 ++ .../cloud/dataproc/v1/WorkflowTemplate.java | 392 ++++ .../v1/WorkflowTemplateOrBuilder.java | 65 + .../dataproc/v1/WorkflowTemplatesProto.java | 378 ++-- .../google/cloud/dataproc/v1/clusters.proto | 272 ++- .../proto/google/cloud/dataproc/v1/jobs.proto | 70 +- .../google/cloud/dataproc/v1/shared.proto | 26 +- .../dataproc/v1/workflow_templates.proto | 110 +- synth.metadata | 26 +- 81 files changed, 15700 insertions(+), 1034 deletions(-) create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterName.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfig.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfigOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfig.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfigOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinity.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinityOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ServiceName.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfig.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfigOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequest.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequestOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequest.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequestOrBuilder.java diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java index 022ba23f..87cb4c5f 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java @@ -448,6 +448,172 @@ public final UnaryCallable updateClusterCallabl return stub.updateClusterCallable(); } + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stops a cluster in a project. + * + *

Sample code: + * + *

{@code
+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   StopClusterRequest request =
+   *       StopClusterRequest.newBuilder()
+   *           .setProjectId("projectId-894832108")
+   *           .setRegion("region-934795532")
+   *           .setClusterName("clusterName-1141738587")
+   *           .setClusterUuid("clusterUuid-1141510955")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   Cluster response = clusterControllerClient.stopClusterAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture stopClusterAsync( + StopClusterRequest request) { + return stopClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stops a cluster in a project. + * + *

Sample code: + * + *

{@code
+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   StopClusterRequest request =
+   *       StopClusterRequest.newBuilder()
+   *           .setProjectId("projectId-894832108")
+   *           .setRegion("region-934795532")
+   *           .setClusterName("clusterName-1141738587")
+   *           .setClusterUuid("clusterUuid-1141510955")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       clusterControllerClient.stopClusterOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Cluster response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + stopClusterOperationCallable() { + return stub.stopClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stops a cluster in a project. + * + *

Sample code: + * + *

{@code
+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   StopClusterRequest request =
+   *       StopClusterRequest.newBuilder()
+   *           .setProjectId("projectId-894832108")
+   *           .setRegion("region-934795532")
+   *           .setClusterName("clusterName-1141738587")
+   *           .setClusterUuid("clusterUuid-1141510955")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       clusterControllerClient.stopClusterCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable stopClusterCallable() { + return stub.stopClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts a cluster in a project. + * + *

Sample code: + * + *

{@code
+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   StartClusterRequest request =
+   *       StartClusterRequest.newBuilder()
+   *           .setProjectId("projectId-894832108")
+   *           .setRegion("region-934795532")
+   *           .setClusterName("clusterName-1141738587")
+   *           .setClusterUuid("clusterUuid-1141510955")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   Cluster response = clusterControllerClient.startClusterAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture startClusterAsync( + StartClusterRequest request) { + return startClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts a cluster in a project. + * + *

Sample code: + * + *

{@code
+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   StartClusterRequest request =
+   *       StartClusterRequest.newBuilder()
+   *           .setProjectId("projectId-894832108")
+   *           .setRegion("region-934795532")
+   *           .setClusterName("clusterName-1141738587")
+   *           .setClusterUuid("clusterUuid-1141510955")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       clusterControllerClient.startClusterOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Cluster response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + startClusterOperationCallable() { + return stub.startClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts a cluster in a project. + * + *

Sample code: + * + *

{@code
+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   StartClusterRequest request =
+   *       StartClusterRequest.newBuilder()
+   *           .setProjectId("projectId-894832108")
+   *           .setRegion("region-934795532")
+   *           .setClusterName("clusterName-1141738587")
+   *           .setClusterUuid("clusterUuid-1141510955")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       clusterControllerClient.startClusterCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable startClusterCallable() { + return stub.startClusterCallable(); + } + // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Deletes a cluster in a project. The returned diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerSettings.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerSettings.java index e4d40160..27f7760d 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerSettings.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerSettings.java @@ -94,6 +94,28 @@ public UnaryCallSettings updateClusterSettings( return ((ClusterControllerStubSettings) getStubSettings()).updateClusterOperationSettings(); } + /** Returns the object with the settings used for calls to stopCluster. */ + public UnaryCallSettings stopClusterSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).stopClusterSettings(); + } + + /** Returns the object with the settings used for calls to stopCluster. */ + public OperationCallSettings + stopClusterOperationSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).stopClusterOperationSettings(); + } + + /** Returns the object with the settings used for calls to startCluster. */ + public UnaryCallSettings startClusterSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).startClusterSettings(); + } + + /** Returns the object with the settings used for calls to startCluster. */ + public OperationCallSettings + startClusterOperationSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).startClusterOperationSettings(); + } + /** Returns the object with the settings used for calls to deleteCluster. */ public UnaryCallSettings deleteClusterSettings() { return ((ClusterControllerStubSettings) getStubSettings()).deleteClusterSettings(); @@ -248,6 +270,28 @@ public UnaryCallSettings.Builder updateClusterS return getStubSettingsBuilder().updateClusterOperationSettings(); } + /** Returns the builder for the settings used for calls to stopCluster. */ + public UnaryCallSettings.Builder stopClusterSettings() { + return getStubSettingsBuilder().stopClusterSettings(); + } + + /** Returns the builder for the settings used for calls to stopCluster. */ + public OperationCallSettings.Builder + stopClusterOperationSettings() { + return getStubSettingsBuilder().stopClusterOperationSettings(); + } + + /** Returns the builder for the settings used for calls to startCluster. */ + public UnaryCallSettings.Builder startClusterSettings() { + return getStubSettingsBuilder().startClusterSettings(); + } + + /** Returns the builder for the settings used for calls to startCluster. */ + public OperationCallSettings.Builder + startClusterOperationSettings() { + return getStubSettingsBuilder().startClusterOperationSettings(); + } + /** Returns the builder for the settings used for calls to deleteCluster. */ public UnaryCallSettings.Builder deleteClusterSettings() { return getStubSettingsBuilder().deleteClusterSettings(); diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java index d45c399e..71b74c57 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java @@ -647,7 +647,7 @@ public final OperationFuture instantiateWorkflowTemplat * * * @param parameters Optional. Map from parameter names to values that should be used for those - * parameters. Values may not exceed 100 characters. + * parameters. Values may not exceed 1000 characters. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final OperationFuture instantiateWorkflowTemplateAsync( @@ -708,7 +708,7 @@ public final OperationFuture instantiateWorkflowTemplat * * * @param parameters Optional. Map from parameter names to values that should be used for those - * parameters. Values may not exceed 100 characters. + * parameters. Values may not exceed 1000 characters. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final OperationFuture instantiateWorkflowTemplateAsync( diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json index 845a440e..1b98e6d0 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json @@ -49,6 +49,12 @@ "ListClusters": { "methods": ["listClusters", "listClusters", "listClusters", "listClustersPagedCallable", "listClustersCallable"] }, + "StartCluster": { + "methods": ["startClusterAsync", "startClusterOperationCallable", "startClusterCallable"] + }, + "StopCluster": { + "methods": ["stopClusterAsync", "stopClusterOperationCallable", "stopClusterCallable"] + }, "UpdateCluster": { "methods": ["updateClusterAsync", "updateClusterAsync", "updateClusterOperationCallable", "updateClusterCallable"] } diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStub.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStub.java index f75e4fbd..2cae1ff5 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStub.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStub.java @@ -30,6 +30,8 @@ import com.google.cloud.dataproc.v1.GetClusterRequest; import com.google.cloud.dataproc.v1.ListClustersRequest; import com.google.cloud.dataproc.v1.ListClustersResponse; +import com.google.cloud.dataproc.v1.StartClusterRequest; +import com.google.cloud.dataproc.v1.StopClusterRequest; import com.google.cloud.dataproc.v1.UpdateClusterRequest; import com.google.longrunning.Operation; import com.google.longrunning.stub.OperationsStub; @@ -67,6 +69,24 @@ public UnaryCallable updateClusterCallable() { throw new UnsupportedOperationException("Not implemented: updateClusterCallable()"); } + public OperationCallable + stopClusterOperationCallable() { + throw new UnsupportedOperationException("Not implemented: stopClusterOperationCallable()"); + } + + public UnaryCallable stopClusterCallable() { + throw new UnsupportedOperationException("Not implemented: stopClusterCallable()"); + } + + public OperationCallable + startClusterOperationCallable() { + throw new UnsupportedOperationException("Not implemented: startClusterOperationCallable()"); + } + + public UnaryCallable startClusterCallable() { + throw new UnsupportedOperationException("Not implemented: startClusterCallable()"); + } + public OperationCallable deleteClusterOperationCallable() { throw new UnsupportedOperationException("Not implemented: deleteClusterOperationCallable()"); diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStubSettings.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStubSettings.java index ee7a93e4..e379310b 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStubSettings.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/ClusterControllerStubSettings.java @@ -53,6 +53,8 @@ import com.google.cloud.dataproc.v1.GetClusterRequest; import com.google.cloud.dataproc.v1.ListClustersRequest; import com.google.cloud.dataproc.v1.ListClustersResponse; +import com.google.cloud.dataproc.v1.StartClusterRequest; +import com.google.cloud.dataproc.v1.StopClusterRequest; import com.google.cloud.dataproc.v1.UpdateClusterRequest; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -110,6 +112,12 @@ public class ClusterControllerStubSettings extends StubSettings updateClusterSettings; private final OperationCallSettings updateClusterOperationSettings; + private final UnaryCallSettings stopClusterSettings; + private final OperationCallSettings + stopClusterOperationSettings; + private final UnaryCallSettings startClusterSettings; + private final OperationCallSettings + startClusterOperationSettings; private final UnaryCallSettings deleteClusterSettings; private final OperationCallSettings deleteClusterOperationSettings; @@ -197,6 +205,28 @@ public UnaryCallSettings updateClusterSettings( return updateClusterOperationSettings; } + /** Returns the object with the settings used for calls to stopCluster. */ + public UnaryCallSettings stopClusterSettings() { + return stopClusterSettings; + } + + /** Returns the object with the settings used for calls to stopCluster. */ + public OperationCallSettings + stopClusterOperationSettings() { + return stopClusterOperationSettings; + } + + /** Returns the object with the settings used for calls to startCluster. */ + public UnaryCallSettings startClusterSettings() { + return startClusterSettings; + } + + /** Returns the object with the settings used for calls to startCluster. */ + public OperationCallSettings + startClusterOperationSettings() { + return startClusterOperationSettings; + } + /** Returns the object with the settings used for calls to deleteCluster. */ public UnaryCallSettings deleteClusterSettings() { return deleteClusterSettings; @@ -304,6 +334,10 @@ protected ClusterControllerStubSettings(Builder settingsBuilder) throws IOExcept createClusterOperationSettings = settingsBuilder.createClusterOperationSettings().build(); updateClusterSettings = settingsBuilder.updateClusterSettings().build(); updateClusterOperationSettings = settingsBuilder.updateClusterOperationSettings().build(); + stopClusterSettings = settingsBuilder.stopClusterSettings().build(); + stopClusterOperationSettings = settingsBuilder.stopClusterOperationSettings().build(); + startClusterSettings = settingsBuilder.startClusterSettings().build(); + startClusterOperationSettings = settingsBuilder.startClusterOperationSettings().build(); deleteClusterSettings = settingsBuilder.deleteClusterSettings().build(); deleteClusterOperationSettings = settingsBuilder.deleteClusterOperationSettings().build(); getClusterSettings = settingsBuilder.getClusterSettings().build(); @@ -323,6 +357,14 @@ public static class Builder extends StubSettings.Builder updateClusterOperationSettings; + private final UnaryCallSettings.Builder stopClusterSettings; + private final OperationCallSettings.Builder< + StopClusterRequest, Cluster, ClusterOperationMetadata> + stopClusterOperationSettings; + private final UnaryCallSettings.Builder startClusterSettings; + private final OperationCallSettings.Builder< + StartClusterRequest, Cluster, ClusterOperationMetadata> + startClusterOperationSettings; private final UnaryCallSettings.Builder deleteClusterSettings; private final OperationCallSettings.Builder< DeleteClusterRequest, Empty, ClusterOperationMetadata> @@ -345,6 +387,7 @@ public static class Builder extends StubSettings.BuildernewArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); definitions.put( "retry_policy_6_codes", ImmutableSet.copyOf( @@ -371,6 +414,8 @@ public static class Builder extends StubSettings.Builder>of( createClusterSettings, updateClusterSettings, + stopClusterSettings, + startClusterSettings, deleteClusterSettings, getClusterSettings, listClustersSettings, @@ -421,6 +472,10 @@ protected Builder(ClusterControllerStubSettings settings) { createClusterOperationSettings = settings.createClusterOperationSettings.toBuilder(); updateClusterSettings = settings.updateClusterSettings.toBuilder(); updateClusterOperationSettings = settings.updateClusterOperationSettings.toBuilder(); + stopClusterSettings = settings.stopClusterSettings.toBuilder(); + stopClusterOperationSettings = settings.stopClusterOperationSettings.toBuilder(); + startClusterSettings = settings.startClusterSettings.toBuilder(); + startClusterOperationSettings = settings.startClusterOperationSettings.toBuilder(); deleteClusterSettings = settings.deleteClusterSettings.toBuilder(); deleteClusterOperationSettings = settings.deleteClusterOperationSettings.toBuilder(); getClusterSettings = settings.getClusterSettings.toBuilder(); @@ -432,6 +487,8 @@ protected Builder(ClusterControllerStubSettings settings) { ImmutableList.>of( createClusterSettings, updateClusterSettings, + stopClusterSettings, + startClusterSettings, deleteClusterSettings, getClusterSettings, listClustersSettings, @@ -460,6 +517,16 @@ private static Builder initDefaults(Builder builder) { .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_5_codes")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_5_params")); + builder + .stopClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .startClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + builder .deleteClusterSettings() .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_5_codes")) @@ -528,6 +595,53 @@ private static Builder initDefaults(Builder builder) { .setTotalTimeout(Duration.ofMillis(900000L)) .build())); + builder + .stopClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings.newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Cluster.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(ClusterOperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofMillis(300000L)) + .build())); + + builder + .startClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Cluster.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(ClusterOperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofMillis(300000L)) + .build())); + builder .deleteClusterOperationSettings() .setInitialCallSettings( @@ -621,6 +735,32 @@ public UnaryCallSettings.Builder updateClusterS return updateClusterOperationSettings; } + /** Returns the builder for the settings used for calls to stopCluster. */ + public UnaryCallSettings.Builder stopClusterSettings() { + return stopClusterSettings; + } + + /** Returns the builder for the settings used for calls to stopCluster. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder + stopClusterOperationSettings() { + return stopClusterOperationSettings; + } + + /** Returns the builder for the settings used for calls to startCluster. */ + public UnaryCallSettings.Builder startClusterSettings() { + return startClusterSettings; + } + + /** Returns the builder for the settings used for calls to startCluster. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder + startClusterOperationSettings() { + return startClusterOperationSettings; + } + /** Returns the builder for the settings used for calls to deleteCluster. */ public UnaryCallSettings.Builder deleteClusterSettings() { return deleteClusterSettings; diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcClusterControllerStub.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcClusterControllerStub.java index 687b1896..4d93d320 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcClusterControllerStub.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcClusterControllerStub.java @@ -35,6 +35,8 @@ import com.google.cloud.dataproc.v1.GetClusterRequest; import com.google.cloud.dataproc.v1.ListClustersRequest; import com.google.cloud.dataproc.v1.ListClustersResponse; +import com.google.cloud.dataproc.v1.StartClusterRequest; +import com.google.cloud.dataproc.v1.StopClusterRequest; import com.google.cloud.dataproc.v1.UpdateClusterRequest; import com.google.common.collect.ImmutableMap; import com.google.longrunning.Operation; @@ -75,6 +77,23 @@ public class GrpcClusterControllerStub extends ClusterControllerStub { .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) .build(); + private static final MethodDescriptor stopClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1.ClusterController/StopCluster") + .setRequestMarshaller(ProtoUtils.marshaller(StopClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + startClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1.ClusterController/StartCluster") + .setRequestMarshaller(ProtoUtils.marshaller(StartClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + private static final MethodDescriptor deleteClusterMethodDescriptor = MethodDescriptor.newBuilder() @@ -119,6 +138,12 @@ public class GrpcClusterControllerStub extends ClusterControllerStub { private final UnaryCallable updateClusterCallable; private final OperationCallable updateClusterOperationCallable; + private final UnaryCallable stopClusterCallable; + private final OperationCallable + stopClusterOperationCallable; + private final UnaryCallable startClusterCallable; + private final OperationCallable + startClusterOperationCallable; private final UnaryCallable deleteClusterCallable; private final OperationCallable deleteClusterOperationCallable; @@ -204,6 +229,36 @@ public Map extract(UpdateClusterRequest request) { } }) .build(); + GrpcCallSettings stopClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(stopClusterMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(StopClusterRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("cluster_name", String.valueOf(request.getClusterName())); + params.put("project_id", String.valueOf(request.getProjectId())); + params.put("region", String.valueOf(request.getRegion())); + return params.build(); + } + }) + .build(); + GrpcCallSettings startClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(startClusterMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(StartClusterRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("cluster_name", String.valueOf(request.getClusterName())); + params.put("project_id", String.valueOf(request.getProjectId())); + params.put("region", String.valueOf(request.getRegion())); + return params.build(); + } + }) + .build(); GrpcCallSettings deleteClusterTransportSettings = GrpcCallSettings.newBuilder() .setMethodDescriptor(deleteClusterMethodDescriptor) @@ -282,6 +337,24 @@ public Map extract(DiagnoseClusterRequest request) { settings.updateClusterOperationSettings(), clientContext, operationsStub); + this.stopClusterCallable = + callableFactory.createUnaryCallable( + stopClusterTransportSettings, settings.stopClusterSettings(), clientContext); + this.stopClusterOperationCallable = + callableFactory.createOperationCallable( + stopClusterTransportSettings, + settings.stopClusterOperationSettings(), + clientContext, + operationsStub); + this.startClusterCallable = + callableFactory.createUnaryCallable( + startClusterTransportSettings, settings.startClusterSettings(), clientContext); + this.startClusterOperationCallable = + callableFactory.createOperationCallable( + startClusterTransportSettings, + settings.startClusterOperationSettings(), + clientContext, + operationsStub); this.deleteClusterCallable = callableFactory.createUnaryCallable( deleteClusterTransportSettings, settings.deleteClusterSettings(), clientContext); @@ -340,6 +413,28 @@ public UnaryCallable updateClusterCallable() { return updateClusterOperationCallable; } + @Override + public UnaryCallable stopClusterCallable() { + return stopClusterCallable; + } + + @Override + public OperationCallable + stopClusterOperationCallable() { + return stopClusterOperationCallable; + } + + @Override + public UnaryCallable startClusterCallable() { + return startClusterCallable; + } + + @Override + public OperationCallable + startClusterOperationCallable() { + return startClusterOperationCallable; + } + @Override public UnaryCallable deleteClusterCallable() { return deleteClusterCallable; diff --git a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/ClusterControllerClientTest.java b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/ClusterControllerClientTest.java index b43be30a..4de43fa7 100644 --- a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/ClusterControllerClientTest.java +++ b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/ClusterControllerClientTest.java @@ -211,6 +211,148 @@ public void updateClusterExceptionTest() throws Exception { } } + @Test + public void stopClusterTest() throws Exception { + Cluster expectedResponse = + Cluster.newBuilder() + .setProjectId("projectId-894832108") + .setClusterName("clusterName-1141738587") + .setConfig(ClusterConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .setStatus(ClusterStatus.newBuilder().build()) + .addAllStatusHistory(new ArrayList()) + .setClusterUuid("clusterUuid-1141510955") + .setMetrics(ClusterMetrics.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("stopClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockClusterController.addResponse(resultOperation); + + StopClusterRequest request = + StopClusterRequest.newBuilder() + .setProjectId("projectId-894832108") + .setRegion("region-934795532") + .setClusterName("clusterName-1141738587") + .setClusterUuid("clusterUuid-1141510955") + .setRequestId("requestId693933066") + .build(); + + Cluster actualResponse = client.stopClusterAsync(request).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockClusterController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + StopClusterRequest actualRequest = ((StopClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getProjectId(), actualRequest.getProjectId()); + Assert.assertEquals(request.getRegion(), actualRequest.getRegion()); + Assert.assertEquals(request.getClusterName(), actualRequest.getClusterName()); + Assert.assertEquals(request.getClusterUuid(), actualRequest.getClusterUuid()); + Assert.assertEquals(request.getRequestId(), actualRequest.getRequestId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void stopClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockClusterController.addException(exception); + + try { + StopClusterRequest request = + StopClusterRequest.newBuilder() + .setProjectId("projectId-894832108") + .setRegion("region-934795532") + .setClusterName("clusterName-1141738587") + .setClusterUuid("clusterUuid-1141510955") + .setRequestId("requestId693933066") + .build(); + client.stopClusterAsync(request).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void startClusterTest() throws Exception { + Cluster expectedResponse = + Cluster.newBuilder() + .setProjectId("projectId-894832108") + .setClusterName("clusterName-1141738587") + .setConfig(ClusterConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .setStatus(ClusterStatus.newBuilder().build()) + .addAllStatusHistory(new ArrayList()) + .setClusterUuid("clusterUuid-1141510955") + .setMetrics(ClusterMetrics.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("startClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockClusterController.addResponse(resultOperation); + + StartClusterRequest request = + StartClusterRequest.newBuilder() + .setProjectId("projectId-894832108") + .setRegion("region-934795532") + .setClusterName("clusterName-1141738587") + .setClusterUuid("clusterUuid-1141510955") + .setRequestId("requestId693933066") + .build(); + + Cluster actualResponse = client.startClusterAsync(request).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockClusterController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + StartClusterRequest actualRequest = ((StartClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getProjectId(), actualRequest.getProjectId()); + Assert.assertEquals(request.getRegion(), actualRequest.getRegion()); + Assert.assertEquals(request.getClusterName(), actualRequest.getClusterName()); + Assert.assertEquals(request.getClusterUuid(), actualRequest.getClusterUuid()); + Assert.assertEquals(request.getRequestId(), actualRequest.getRequestId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void startClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockClusterController.addException(exception); + + try { + StartClusterRequest request = + StartClusterRequest.newBuilder() + .setProjectId("projectId-894832108") + .setRegion("region-934795532") + .setClusterName("clusterName-1141738587") + .setClusterUuid("clusterUuid-1141510955") + .setRequestId("requestId693933066") + .build(); + client.startClusterAsync(request).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + @Test public void deleteClusterTest() throws Exception { Empty expectedResponse = Empty.newBuilder().build(); diff --git a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockClusterControllerImpl.java b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockClusterControllerImpl.java index 0d3eb918..90f5bdc3 100644 --- a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockClusterControllerImpl.java +++ b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockClusterControllerImpl.java @@ -101,6 +101,47 @@ public void updateCluster( } } + @Override + public void stopCluster(StopClusterRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method StopCluster, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void startCluster( + StartClusterRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method StartCluster, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + @Override public void deleteCluster( DeleteClusterRequest request, StreamObserver responseObserver) { diff --git a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClientTest.java b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClientTest.java index ec172781..26e26daf 100644 --- a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClientTest.java +++ b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClientTest.java @@ -30,6 +30,7 @@ import com.google.longrunning.Operation; import com.google.protobuf.AbstractMessage; import com.google.protobuf.Any; +import com.google.protobuf.Duration; import com.google.protobuf.Empty; import com.google.protobuf.Timestamp; import io.grpc.StatusRuntimeException; @@ -104,6 +105,7 @@ public void createWorkflowTemplateTest() throws Exception { .setPlacement(WorkflowTemplatePlacement.newBuilder().build()) .addAllJobs(new ArrayList()) .addAllParameters(new ArrayList()) + .setDagTimeout(Duration.newBuilder().build()) .build(); mockWorkflowTemplateService.addResponse(expectedResponse); @@ -157,6 +159,7 @@ public void createWorkflowTemplateTest2() throws Exception { .setPlacement(WorkflowTemplatePlacement.newBuilder().build()) .addAllJobs(new ArrayList()) .addAllParameters(new ArrayList()) + .setDagTimeout(Duration.newBuilder().build()) .build(); mockWorkflowTemplateService.addResponse(expectedResponse); @@ -210,6 +213,7 @@ public void createWorkflowTemplateTest3() throws Exception { .setPlacement(WorkflowTemplatePlacement.newBuilder().build()) .addAllJobs(new ArrayList()) .addAllParameters(new ArrayList()) + .setDagTimeout(Duration.newBuilder().build()) .build(); mockWorkflowTemplateService.addResponse(expectedResponse); @@ -263,6 +267,7 @@ public void getWorkflowTemplateTest() throws Exception { .setPlacement(WorkflowTemplatePlacement.newBuilder().build()) .addAllJobs(new ArrayList()) .addAllParameters(new ArrayList()) + .setDagTimeout(Duration.newBuilder().build()) .build(); mockWorkflowTemplateService.addResponse(expectedResponse); @@ -316,6 +321,7 @@ public void getWorkflowTemplateTest2() throws Exception { .setPlacement(WorkflowTemplatePlacement.newBuilder().build()) .addAllJobs(new ArrayList()) .addAllParameters(new ArrayList()) + .setDagTimeout(Duration.newBuilder().build()) .build(); mockWorkflowTemplateService.addResponse(expectedResponse); @@ -689,6 +695,7 @@ public void updateWorkflowTemplateTest() throws Exception { .setPlacement(WorkflowTemplatePlacement.newBuilder().build()) .addAllJobs(new ArrayList()) .addAllParameters(new ArrayList()) + .setDagTimeout(Duration.newBuilder().build()) .build(); mockWorkflowTemplateService.addResponse(expectedResponse); diff --git a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java index 5da6ef58..4c78be81 100644 --- a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java +++ b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java @@ -121,6 +121,91 @@ private ClusterControllerGrpc() {} return getUpdateClusterMethod; } + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.StopClusterRequest, com.google.longrunning.Operation> + getStopClusterMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StopCluster", + requestType = com.google.cloud.dataproc.v1.StopClusterRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.StopClusterRequest, com.google.longrunning.Operation> + getStopClusterMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.StopClusterRequest, com.google.longrunning.Operation> + getStopClusterMethod; + if ((getStopClusterMethod = ClusterControllerGrpc.getStopClusterMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getStopClusterMethod = ClusterControllerGrpc.getStopClusterMethod) == null) { + ClusterControllerGrpc.getStopClusterMethod = + getStopClusterMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "StopCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.StopClusterRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new ClusterControllerMethodDescriptorSupplier("StopCluster")) + .build(); + } + } + } + return getStopClusterMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.StartClusterRequest, com.google.longrunning.Operation> + getStartClusterMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StartCluster", + requestType = com.google.cloud.dataproc.v1.StartClusterRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.StartClusterRequest, com.google.longrunning.Operation> + getStartClusterMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.StartClusterRequest, com.google.longrunning.Operation> + getStartClusterMethod; + if ((getStartClusterMethod = ClusterControllerGrpc.getStartClusterMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getStartClusterMethod = ClusterControllerGrpc.getStartClusterMethod) == null) { + ClusterControllerGrpc.getStartClusterMethod = + getStartClusterMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "StartCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.StartClusterRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new ClusterControllerMethodDescriptorSupplier("StartCluster")) + .build(); + } + } + } + return getStartClusterMethod; + } + private static volatile io.grpc.MethodDescriptor< com.google.cloud.dataproc.v1.DeleteClusterRequest, com.google.longrunning.Operation> getDeleteClusterMethod; @@ -379,6 +464,34 @@ public void updateCluster( getUpdateClusterMethod(), responseObserver); } + /** + * + * + *
+     * Stops a cluster in a project.
+     * 
+ */ + public void stopCluster( + com.google.cloud.dataproc.v1.StopClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getStopClusterMethod(), responseObserver); + } + + /** + * + * + *
+     * Starts a cluster in a project.
+     * 
+ */ + public void startCluster( + com.google.cloud.dataproc.v1.StartClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getStartClusterMethod(), responseObserver); + } + /** * * @@ -458,6 +571,18 @@ public final io.grpc.ServerServiceDefinition bindService() { new MethodHandlers< com.google.cloud.dataproc.v1.UpdateClusterRequest, com.google.longrunning.Operation>(this, METHODID_UPDATE_CLUSTER))) + .addMethod( + getStopClusterMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1.StopClusterRequest, + com.google.longrunning.Operation>(this, METHODID_STOP_CLUSTER))) + .addMethod( + getStartClusterMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1.StartClusterRequest, + com.google.longrunning.Operation>(this, METHODID_START_CLUSTER))) .addMethod( getDeleteClusterMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( @@ -543,6 +668,38 @@ public void updateCluster( responseObserver); } + /** + * + * + *
+     * Stops a cluster in a project.
+     * 
+ */ + public void stopCluster( + com.google.cloud.dataproc.v1.StopClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getStopClusterMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Starts a cluster in a project.
+     * 
+ */ + public void startCluster( + com.google.cloud.dataproc.v1.StartClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getStartClusterMethod(), getCallOptions()), + request, + responseObserver); + } + /** * * @@ -666,6 +823,32 @@ public com.google.longrunning.Operation updateCluster( getChannel(), getUpdateClusterMethod(), getCallOptions(), request); } + /** + * + * + *
+     * Stops a cluster in a project.
+     * 
+ */ + public com.google.longrunning.Operation stopCluster( + com.google.cloud.dataproc.v1.StopClusterRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getStopClusterMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Starts a cluster in a project.
+     * 
+ */ + public com.google.longrunning.Operation startCluster( + com.google.cloud.dataproc.v1.StartClusterRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getStartClusterMethod(), getCallOptions(), request); + } + /** * * @@ -777,6 +960,32 @@ protected ClusterControllerFutureStub build( getChannel().newCall(getUpdateClusterMethod(), getCallOptions()), request); } + /** + * + * + *
+     * Stops a cluster in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + stopCluster(com.google.cloud.dataproc.v1.StopClusterRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getStopClusterMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Starts a cluster in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + startCluster(com.google.cloud.dataproc.v1.StartClusterRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getStartClusterMethod(), getCallOptions()), request); + } + /** * * @@ -841,10 +1050,12 @@ protected ClusterControllerFutureStub build( private static final int METHODID_CREATE_CLUSTER = 0; private static final int METHODID_UPDATE_CLUSTER = 1; - private static final int METHODID_DELETE_CLUSTER = 2; - private static final int METHODID_GET_CLUSTER = 3; - private static final int METHODID_LIST_CLUSTERS = 4; - private static final int METHODID_DIAGNOSE_CLUSTER = 5; + private static final int METHODID_STOP_CLUSTER = 2; + private static final int METHODID_START_CLUSTER = 3; + private static final int METHODID_DELETE_CLUSTER = 4; + private static final int METHODID_GET_CLUSTER = 5; + private static final int METHODID_LIST_CLUSTERS = 6; + private static final int METHODID_DIAGNOSE_CLUSTER = 7; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, @@ -873,6 +1084,16 @@ public void invoke(Req request, io.grpc.stub.StreamObserver responseObserv (com.google.cloud.dataproc.v1.UpdateClusterRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; + case METHODID_STOP_CLUSTER: + serviceImpl.stopCluster( + (com.google.cloud.dataproc.v1.StopClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_START_CLUSTER: + serviceImpl.startCluster( + (com.google.cloud.dataproc.v1.StartClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; case METHODID_DELETE_CLUSTER: serviceImpl.deleteCluster( (com.google.cloud.dataproc.v1.DeleteClusterRequest) request, @@ -960,6 +1181,8 @@ public static io.grpc.ServiceDescriptor getServiceDescriptor() { .setSchemaDescriptor(new ClusterControllerFileDescriptorSupplier()) .addMethod(getCreateClusterMethod()) .addMethod(getUpdateClusterMethod()) + .addMethod(getStopClusterMethod()) + .addMethod(getStartClusterMethod()) .addMethod(getDeleteClusterMethod()) .addMethod(getGetClusterMethod()) .addMethod(getListClustersMethod()) diff --git a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java index 665b780e..3f284e38 100644 --- a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java +++ b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java @@ -514,7 +514,8 @@ public void instantiateWorkflowTemplate( *
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -733,7 +734,8 @@ public void instantiateWorkflowTemplate(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -896,7 +898,8 @@ public com.google.longrunning.Operation instantiateWorkflowTemplate(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -1048,7 +1051,8 @@ protected WorkflowTemplateServiceFutureStub build(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java
index 4e903d22..88051058 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java
@@ -259,6 +259,38 @@ private ClusterConfig(
                 endpointConfig_ = subBuilder.buildPartial();
               }
 
+              break;
+            }
+          case 162:
+            {
+              com.google.cloud.dataproc.v1.MetastoreConfig.Builder subBuilder = null;
+              if (metastoreConfig_ != null) {
+                subBuilder = metastoreConfig_.toBuilder();
+              }
+              metastoreConfig_ =
+                  input.readMessage(
+                      com.google.cloud.dataproc.v1.MetastoreConfig.parser(), extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(metastoreConfig_);
+                metastoreConfig_ = subBuilder.buildPartial();
+              }
+
+              break;
+            }
+          case 170:
+            {
+              com.google.cloud.dataproc.v1.GkeClusterConfig.Builder subBuilder = null;
+              if (gkeClusterConfig_ != null) {
+                subBuilder = gkeClusterConfig_.toBuilder();
+              }
+              gkeClusterConfig_ =
+                  input.readMessage(
+                      com.google.cloud.dataproc.v1.GkeClusterConfig.parser(), extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(gkeClusterConfig_);
+                gkeClusterConfig_ = subBuilder.buildPartial();
+              }
+
               break;
             }
           default:
@@ -313,6 +345,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    * and manage this project-level, per-location bucket (see
    * [Dataproc staging
    * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+   * **This field requires a Cloud Storage bucket name, not a URI to a Cloud
+   * Storage bucket.**
    * 
* * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -344,6 +378,8 @@ public java.lang.String getConfigBucket() { * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** *
* * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -378,6 +414,8 @@ public com.google.protobuf.ByteString getConfigBucketBytes() { * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** *
* * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -409,6 +447,8 @@ public java.lang.String getTempBucket() { * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** *
* * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -1121,6 +1161,123 @@ public com.google.cloud.dataproc.v1.EndpointConfigOrBuilder getEndpointConfigOrB return getEndpointConfig(); } + public static final int METASTORE_CONFIG_FIELD_NUMBER = 20; + private com.google.cloud.dataproc.v1.MetastoreConfig metastoreConfig_; + /** + * + * + *
+   * Optional. Metastore configuration.
+   * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the metastoreConfig field is set. + */ + @java.lang.Override + public boolean hasMetastoreConfig() { + return metastoreConfig_ != null; + } + /** + * + * + *
+   * Optional. Metastore configuration.
+   * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The metastoreConfig. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.MetastoreConfig getMetastoreConfig() { + return metastoreConfig_ == null + ? com.google.cloud.dataproc.v1.MetastoreConfig.getDefaultInstance() + : metastoreConfig_; + } + /** + * + * + *
+   * Optional. Metastore configuration.
+   * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder getMetastoreConfigOrBuilder() { + return getMetastoreConfig(); + } + + public static final int GKE_CLUSTER_CONFIG_FIELD_NUMBER = 21; + private com.google.cloud.dataproc.v1.GkeClusterConfig gkeClusterConfig_; + /** + * + * + *
+   * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+   * Kubernetes. Setting this is considered mutually exclusive with Compute
+   * Engine-based options such as `gce_cluster_config`, `master_config`,
+   * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the gkeClusterConfig field is set. + */ + @java.lang.Override + public boolean hasGkeClusterConfig() { + return gkeClusterConfig_ != null; + } + /** + * + * + *
+   * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+   * Kubernetes. Setting this is considered mutually exclusive with Compute
+   * Engine-based options such as `gce_cluster_config`, `master_config`,
+   * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The gkeClusterConfig. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig() { + return gkeClusterConfig_ == null + ? com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance() + : gkeClusterConfig_; + } + /** + * + * + *
+   * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+   * Kubernetes. Setting this is considered mutually exclusive with Compute
+   * Engine-based options such as `gce_cluster_config`, `master_config`,
+   * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder() { + return getGkeClusterConfig(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -1174,6 +1331,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (endpointConfig_ != null) { output.writeMessage(19, getEndpointConfig()); } + if (metastoreConfig_ != null) { + output.writeMessage(20, getMetastoreConfig()); + } + if (gkeClusterConfig_ != null) { + output.writeMessage(21, getGkeClusterConfig()); + } unknownFields.writeTo(output); } @@ -1225,6 +1388,12 @@ public int getSerializedSize() { if (endpointConfig_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(19, getEndpointConfig()); } + if (metastoreConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getMetastoreConfig()); + } + if (gkeClusterConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(21, getGkeClusterConfig()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1284,6 +1453,14 @@ public boolean equals(final java.lang.Object obj) { if (hasEndpointConfig()) { if (!getEndpointConfig().equals(other.getEndpointConfig())) return false; } + if (hasMetastoreConfig() != other.hasMetastoreConfig()) return false; + if (hasMetastoreConfig()) { + if (!getMetastoreConfig().equals(other.getMetastoreConfig())) return false; + } + if (hasGkeClusterConfig() != other.hasGkeClusterConfig()) return false; + if (hasGkeClusterConfig()) { + if (!getGkeClusterConfig().equals(other.getGkeClusterConfig())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1343,6 +1520,14 @@ public int hashCode() { hash = (37 * hash) + ENDPOINT_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getEndpointConfig().hashCode(); } + if (hasMetastoreConfig()) { + hash = (37 * hash) + METASTORE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getMetastoreConfig().hashCode(); + } + if (hasGkeClusterConfig()) { + hash = (37 * hash) + GKE_CLUSTER_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getGkeClusterConfig().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1560,6 +1745,18 @@ public Builder clear() { endpointConfig_ = null; endpointConfigBuilder_ = null; } + if (metastoreConfigBuilder_ == null) { + metastoreConfig_ = null; + } else { + metastoreConfig_ = null; + metastoreConfigBuilder_ = null; + } + if (gkeClusterConfigBuilder_ == null) { + gkeClusterConfig_ = null; + } else { + gkeClusterConfig_ = null; + gkeClusterConfigBuilder_ = null; + } return this; } @@ -1649,6 +1846,16 @@ public com.google.cloud.dataproc.v1.ClusterConfig buildPartial() { } else { result.endpointConfig_ = endpointConfigBuilder_.build(); } + if (metastoreConfigBuilder_ == null) { + result.metastoreConfig_ = metastoreConfig_; + } else { + result.metastoreConfig_ = metastoreConfigBuilder_.build(); + } + if (gkeClusterConfigBuilder_ == null) { + result.gkeClusterConfig_ = gkeClusterConfig_; + } else { + result.gkeClusterConfig_ = gkeClusterConfigBuilder_.build(); + } onBuilt(); return result; } @@ -1763,6 +1970,12 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.ClusterConfig other) { if (other.hasEndpointConfig()) { mergeEndpointConfig(other.getEndpointConfig()); } + if (other.hasMetastoreConfig()) { + mergeMetastoreConfig(other.getMetastoreConfig()); + } + if (other.hasGkeClusterConfig()) { + mergeGkeClusterConfig(other.getGkeClusterConfig()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1808,6 +2021,8 @@ public Builder mergeFrom( * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1838,6 +2053,8 @@ public java.lang.String getConfigBucket() { * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1868,6 +2085,8 @@ public com.google.protobuf.ByteString getConfigBucketBytes() { * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1897,6 +2116,8 @@ public Builder setConfigBucket(java.lang.String value) { * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1922,6 +2143,8 @@ public Builder clearConfigBucket() { * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1954,6 +2177,8 @@ public Builder setConfigBucketBytes(com.google.protobuf.ByteString value) { * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -1984,6 +2209,8 @@ public java.lang.String getTempBucket() { * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -2014,6 +2241,8 @@ public com.google.protobuf.ByteString getTempBucketBytes() { * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -2043,6 +2272,8 @@ public Builder setTempBucket(java.lang.String value) { * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -2068,6 +2299,8 @@ public Builder clearTempBucket() { * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -4775,6 +5008,441 @@ public com.google.cloud.dataproc.v1.EndpointConfigOrBuilder getEndpointConfigOrB return endpointConfigBuilder_; } + private com.google.cloud.dataproc.v1.MetastoreConfig metastoreConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.MetastoreConfig, + com.google.cloud.dataproc.v1.MetastoreConfig.Builder, + com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder> + metastoreConfigBuilder_; + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the metastoreConfig field is set. + */ + public boolean hasMetastoreConfig() { + return metastoreConfigBuilder_ != null || metastoreConfig_ != null; + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The metastoreConfig. + */ + public com.google.cloud.dataproc.v1.MetastoreConfig getMetastoreConfig() { + if (metastoreConfigBuilder_ == null) { + return metastoreConfig_ == null + ? com.google.cloud.dataproc.v1.MetastoreConfig.getDefaultInstance() + : metastoreConfig_; + } else { + return metastoreConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMetastoreConfig(com.google.cloud.dataproc.v1.MetastoreConfig value) { + if (metastoreConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metastoreConfig_ = value; + onChanged(); + } else { + metastoreConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMetastoreConfig( + com.google.cloud.dataproc.v1.MetastoreConfig.Builder builderForValue) { + if (metastoreConfigBuilder_ == null) { + metastoreConfig_ = builderForValue.build(); + onChanged(); + } else { + metastoreConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeMetastoreConfig(com.google.cloud.dataproc.v1.MetastoreConfig value) { + if (metastoreConfigBuilder_ == null) { + if (metastoreConfig_ != null) { + metastoreConfig_ = + com.google.cloud.dataproc.v1.MetastoreConfig.newBuilder(metastoreConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + metastoreConfig_ = value; + } + onChanged(); + } else { + metastoreConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearMetastoreConfig() { + if (metastoreConfigBuilder_ == null) { + metastoreConfig_ = null; + onChanged(); + } else { + metastoreConfig_ = null; + metastoreConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.MetastoreConfig.Builder getMetastoreConfigBuilder() { + + onChanged(); + return getMetastoreConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder getMetastoreConfigOrBuilder() { + if (metastoreConfigBuilder_ != null) { + return metastoreConfigBuilder_.getMessageOrBuilder(); + } else { + return metastoreConfig_ == null + ? com.google.cloud.dataproc.v1.MetastoreConfig.getDefaultInstance() + : metastoreConfig_; + } + } + /** + * + * + *
+     * Optional. Metastore configuration.
+     * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.MetastoreConfig, + com.google.cloud.dataproc.v1.MetastoreConfig.Builder, + com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder> + getMetastoreConfigFieldBuilder() { + if (metastoreConfigBuilder_ == null) { + metastoreConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.MetastoreConfig, + com.google.cloud.dataproc.v1.MetastoreConfig.Builder, + com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder>( + getMetastoreConfig(), getParentForChildren(), isClean()); + metastoreConfig_ = null; + } + return metastoreConfigBuilder_; + } + + private com.google.cloud.dataproc.v1.GkeClusterConfig gkeClusterConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.GkeClusterConfig, + com.google.cloud.dataproc.v1.GkeClusterConfig.Builder, + com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder> + gkeClusterConfigBuilder_; + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the gkeClusterConfig field is set. + */ + public boolean hasGkeClusterConfig() { + return gkeClusterConfigBuilder_ != null || gkeClusterConfig_ != null; + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The gkeClusterConfig. + */ + public com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig() { + if (gkeClusterConfigBuilder_ == null) { + return gkeClusterConfig_ == null + ? com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance() + : gkeClusterConfig_; + } else { + return gkeClusterConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setGkeClusterConfig(com.google.cloud.dataproc.v1.GkeClusterConfig value) { + if (gkeClusterConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + gkeClusterConfig_ = value; + onChanged(); + } else { + gkeClusterConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setGkeClusterConfig( + com.google.cloud.dataproc.v1.GkeClusterConfig.Builder builderForValue) { + if (gkeClusterConfigBuilder_ == null) { + gkeClusterConfig_ = builderForValue.build(); + onChanged(); + } else { + gkeClusterConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeGkeClusterConfig(com.google.cloud.dataproc.v1.GkeClusterConfig value) { + if (gkeClusterConfigBuilder_ == null) { + if (gkeClusterConfig_ != null) { + gkeClusterConfig_ = + com.google.cloud.dataproc.v1.GkeClusterConfig.newBuilder(gkeClusterConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + gkeClusterConfig_ = value; + } + onChanged(); + } else { + gkeClusterConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearGkeClusterConfig() { + if (gkeClusterConfigBuilder_ == null) { + gkeClusterConfig_ = null; + onChanged(); + } else { + gkeClusterConfig_ = null; + gkeClusterConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.GkeClusterConfig.Builder getGkeClusterConfigBuilder() { + + onChanged(); + return getGkeClusterConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder() { + if (gkeClusterConfigBuilder_ != null) { + return gkeClusterConfigBuilder_.getMessageOrBuilder(); + } else { + return gkeClusterConfig_ == null + ? com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance() + : gkeClusterConfig_; + } + } + /** + * + * + *
+     * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+     * Kubernetes. Setting this is considered mutually exclusive with Compute
+     * Engine-based options such as `gce_cluster_config`, `master_config`,
+     * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.GkeClusterConfig, + com.google.cloud.dataproc.v1.GkeClusterConfig.Builder, + com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder> + getGkeClusterConfigFieldBuilder() { + if (gkeClusterConfigBuilder_ == null) { + gkeClusterConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.GkeClusterConfig, + com.google.cloud.dataproc.v1.GkeClusterConfig.Builder, + com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder>( + getGkeClusterConfig(), getParentForChildren(), isClean()); + gkeClusterConfig_ = null; + } + return gkeClusterConfigBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java index faec0811..0c6667e5 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java @@ -36,6 +36,8 @@ public interface ClusterConfigOrBuilder * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -56,6 +58,8 @@ public interface ClusterConfigOrBuilder * and manage this project-level, per-location bucket (see * [Dataproc staging * bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string config_bucket = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -77,6 +81,8 @@ public interface ClusterConfigOrBuilder * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -97,6 +103,8 @@ public interface ClusterConfigOrBuilder * and manage this project-level, per-location bucket. The default bucket has * a TTL of 90 days, but you can use any TTL (or none) if you specify a * bucket. + * **This field requires a Cloud Storage bucket name, not a URI to a Cloud + * Storage bucket.** * * * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -648,4 +656,95 @@ com.google.cloud.dataproc.v1.NodeInitializationActionOrBuilder getInitialization * */ com.google.cloud.dataproc.v1.EndpointConfigOrBuilder getEndpointConfigOrBuilder(); + + /** + * + * + *
+   * Optional. Metastore configuration.
+   * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the metastoreConfig field is set. + */ + boolean hasMetastoreConfig(); + /** + * + * + *
+   * Optional. Metastore configuration.
+   * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The metastoreConfig. + */ + com.google.cloud.dataproc.v1.MetastoreConfig getMetastoreConfig(); + /** + * + * + *
+   * Optional. Metastore configuration.
+   * 
+ * + * + * .google.cloud.dataproc.v1.MetastoreConfig metastore_config = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder getMetastoreConfigOrBuilder(); + + /** + * + * + *
+   * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+   * Kubernetes. Setting this is considered mutually exclusive with Compute
+   * Engine-based options such as `gce_cluster_config`, `master_config`,
+   * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the gkeClusterConfig field is set. + */ + boolean hasGkeClusterConfig(); + /** + * + * + *
+   * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+   * Kubernetes. Setting this is considered mutually exclusive with Compute
+   * Engine-based options such as `gce_cluster_config`, `master_config`,
+   * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The gkeClusterConfig. + */ + com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig(); + /** + * + * + *
+   * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
+   * Kubernetes. Setting this is considered mutually exclusive with Compute
+   * Engine-based options such as `gce_cluster_config`, `master_config`,
+   * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterName.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterName.java new file mode 100644 index 00000000..322bc06f --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterName.java @@ -0,0 +1,223 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ClusterName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_CLUSTER = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/clusters/{cluster}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String cluster; + + @Deprecated + protected ClusterName() { + project = null; + location = null; + cluster = null; + } + + private ClusterName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + cluster = Preconditions.checkNotNull(builder.getCluster()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getCluster() { + return cluster; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ClusterName of(String project, String location, String cluster) { + return newBuilder().setProject(project).setLocation(location).setCluster(cluster).build(); + } + + public static String format(String project, String location, String cluster) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setCluster(cluster) + .build() + .toString(); + } + + public static ClusterName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_CLUSTER.validatedMatch( + formattedString, "ClusterName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("location"), matchMap.get("cluster")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ClusterName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_CLUSTER.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (cluster != null) { + fieldMapBuilder.put("cluster", cluster); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_CLUSTER.instantiate( + "project", project, "location", location, "cluster", cluster); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null || getClass() == o.getClass()) { + ClusterName that = ((ClusterName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.cluster, that.cluster); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(cluster); + return h; + } + + /** Builder for projects/{project}/locations/{location}/clusters/{cluster}. */ + public static class Builder { + private String project; + private String location; + private String cluster; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getCluster() { + return cluster; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setCluster(String cluster) { + this.cluster = cluster; + return this; + } + + private Builder(ClusterName clusterName) { + project = clusterName.project; + location = clusterName.location; + cluster = clusterName.cluster; + } + + public ClusterName build() { + return new ClusterName(this); + } + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java index f3906534..a32f4d58 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java @@ -212,6 +212,36 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum { * UPDATING = 5; */ UPDATING(5), + /** + * + * + *
+     * The cluster is being stopped. It cannot be used.
+     * 
+ * + * STOPPING = 6; + */ + STOPPING(6), + /** + * + * + *
+     * The cluster is currently stopped. It is not ready for use.
+     * 
+ * + * STOPPED = 7; + */ + STOPPED(7), + /** + * + * + *
+     * The cluster is being started. It is not ready for use.
+     * 
+ * + * STARTING = 8; + */ + STARTING(8), UNRECOGNIZED(-1), ; @@ -275,6 +305,36 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum { * UPDATING = 5; */ public static final int UPDATING_VALUE = 5; + /** + * + * + *
+     * The cluster is being stopped. It cannot be used.
+     * 
+ * + * STOPPING = 6; + */ + public static final int STOPPING_VALUE = 6; + /** + * + * + *
+     * The cluster is currently stopped. It is not ready for use.
+     * 
+ * + * STOPPED = 7; + */ + public static final int STOPPED_VALUE = 7; + /** + * + * + *
+     * The cluster is being started. It is not ready for use.
+     * 
+ * + * STARTING = 8; + */ + public static final int STARTING_VALUE = 8; public final int getNumber() { if (this == UNRECOGNIZED) { @@ -312,6 +372,12 @@ public static State forNumber(int value) { return DELETING; case 5: return UPDATING; + case 6: + return STOPPING; + case 7: + return STOPPED; + case 8: + return STARTING; default: return null; } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java index b4e41d27..db02a2ca 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java @@ -39,6 +39,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_ClusterConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_ClusterConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -63,6 +71,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_GceClusterConfig_MetadataEntry_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_GceClusterConfig_MetadataEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -95,6 +111,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_KerberosConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_KerberosConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_IdentityConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_IdentityConfig_UserServiceAccountMappingEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_IdentityConfig_UserServiceAccountMappingEntry_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_SoftwareConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -107,6 +131,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_MetastoreConfig_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -127,6 +155,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_StopClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_StartClusterRequest_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -167,212 +203,273 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "\n\'google/cloud/dataproc/v1/clusters.prot" + "o\022\030google.cloud.dataproc.v1\032\034google/api/" + "annotations.proto\032\027google/api/client.pro" - + "to\032\037google/api/field_behavior.proto\032%goo" - + "gle/cloud/dataproc/v1/shared.proto\032#goog" - + "le/longrunning/operations.proto\032\036google/" - + "protobuf/duration.proto\032 google/protobuf" - + "/field_mask.proto\032\037google/protobuf/times" - + "tamp.proto\"\315\003\n\007Cluster\022\027\n\nproject_id\030\001 \001" - + "(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\022<\n\006co" - + "nfig\030\003 \001(\0132\'.google.cloud.dataproc.v1.Cl" - + "usterConfigB\003\340A\002\022B\n\006labels\030\010 \003(\0132-.googl" - + "e.cloud.dataproc.v1.Cluster.LabelsEntryB" - + "\003\340A\001\022<\n\006status\030\004 \001(\0132\'.google.cloud.data" - + "proc.v1.ClusterStatusB\003\340A\003\022D\n\016status_his" - + "tory\030\007 \003(\0132\'.google.cloud.dataproc.v1.Cl" - + "usterStatusB\003\340A\003\022\031\n\014cluster_uuid\030\006 \001(\tB\003" - + "\340A\003\022>\n\007metrics\030\t \001(\0132(.google.cloud.data" - + "proc.v1.ClusterMetricsB\003\340A\003\032-\n\013LabelsEnt" - + "ry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\222\007\n\rC" - + "lusterConfig\022\032\n\rconfig_bucket\030\001 \001(\tB\003\340A\001" - + "\022\030\n\013temp_bucket\030\002 \001(\tB\003\340A\001\022K\n\022gce_cluste" - + "r_config\030\010 \001(\0132*.google.cloud.dataproc.v" - + "1.GceClusterConfigB\003\340A\001\022I\n\rmaster_config" - + "\030\t \001(\0132-.google.cloud.dataproc.v1.Instan" - + "ceGroupConfigB\003\340A\001\022I\n\rworker_config\030\n \001(" - + "\0132-.google.cloud.dataproc.v1.InstanceGro" - + "upConfigB\003\340A\001\022S\n\027secondary_worker_config" - + "\030\014 \001(\0132-.google.cloud.dataproc.v1.Instan" - + "ceGroupConfigB\003\340A\001\022F\n\017software_config\030\r " - + "\001(\0132(.google.cloud.dataproc.v1.SoftwareC" - + "onfigB\003\340A\001\022W\n\026initialization_actions\030\013 \003" - + "(\01322.google.cloud.dataproc.v1.NodeInitia" - + "lizationActionB\003\340A\001\022J\n\021encryption_config" - + "\030\017 \001(\0132*.google.cloud.dataproc.v1.Encryp" - + "tionConfigB\003\340A\001\022L\n\022autoscaling_config\030\022 " - + "\001(\0132+.google.cloud.dataproc.v1.Autoscali" - + "ngConfigB\003\340A\001\022F\n\017security_config\030\020 \001(\0132(" - + ".google.cloud.dataproc.v1.SecurityConfig" - + "B\003\340A\001\022H\n\020lifecycle_config\030\021 \001(\0132).google" - + ".cloud.dataproc.v1.LifecycleConfigB\003\340A\001\022" - + "F\n\017endpoint_config\030\023 \001(\0132(.google.cloud." - + "dataproc.v1.EndpointConfigB\003\340A\001\"\272\001\n\016Endp" - + "ointConfig\022P\n\nhttp_ports\030\001 \003(\01327.google." - + "cloud.dataproc.v1.EndpointConfig.HttpPor" - + "tsEntryB\003\340A\003\022$\n\027enable_http_port_access\030" - + "\002 \001(\010B\003\340A\001\0320\n\016HttpPortsEntry\022\013\n\003key\030\001 \001(" - + "\t\022\r\n\005value\030\002 \001(\t:\0028\001\",\n\021AutoscalingConfi" - + "g\022\027\n\npolicy_uri\030\001 \001(\tB\003\340A\001\"4\n\020Encryption" - + "Config\022 \n\023gce_pd_kms_key_name\030\001 \001(\tB\003\340A\001" - + "\"\237\003\n\020GceClusterConfig\022\025\n\010zone_uri\030\001 \001(\tB" - + "\003\340A\001\022\030\n\013network_uri\030\002 \001(\tB\003\340A\001\022\033\n\016subnet" - + "work_uri\030\006 \001(\tB\003\340A\001\022\035\n\020internal_ip_only\030" - + "\007 \001(\010B\003\340A\001\022\034\n\017service_account\030\010 \001(\tB\003\340A\001" - + "\022#\n\026service_account_scopes\030\003 \003(\tB\003\340A\001\022\014\n" - + "\004tags\030\004 \003(\t\022J\n\010metadata\030\005 \003(\01328.google.c" - + "loud.dataproc.v1.GceClusterConfig.Metada" - + "taEntry\022P\n\024reservation_affinity\030\013 \001(\0132-." - + "google.cloud.dataproc.v1.ReservationAffi" - + "nityB\003\340A\001\032/\n\rMetadataEntry\022\013\n\003key\030\001 \001(\t\022" - + "\r\n\005value\030\002 \001(\t:\0028\001\"\315\004\n\023InstanceGroupConf" - + "ig\022\032\n\rnum_instances\030\001 \001(\005B\003\340A\001\022\033\n\016instan" - + "ce_names\030\002 \003(\tB\003\340A\003\022\026\n\timage_uri\030\003 \001(\tB\003" - + "\340A\001\022\035\n\020machine_type_uri\030\004 \001(\tB\003\340A\001\022>\n\013di" - + "sk_config\030\005 \001(\0132$.google.cloud.dataproc." - + "v1.DiskConfigB\003\340A\001\022\033\n\016is_preemptible\030\006 \001" - + "(\010B\003\340A\003\022Y\n\016preemptibility\030\n \001(\0162<.google" - + ".cloud.dataproc.v1.InstanceGroupConfig.P" - + "reemptibilityB\003\340A\001\022O\n\024managed_group_conf" - + "ig\030\007 \001(\0132,.google.cloud.dataproc.v1.Mana" - + "gedGroupConfigB\003\340A\003\022F\n\014accelerators\030\010 \003(" - + "\0132+.google.cloud.dataproc.v1.Accelerator" - + "ConfigB\003\340A\001\022\035\n\020min_cpu_platform\030\t \001(\tB\003\340" - + "A\001\"V\n\016Preemptibility\022\036\n\032PREEMPTIBILITY_U" - + "NSPECIFIED\020\000\022\023\n\017NON_PREEMPTIBLE\020\001\022\017\n\013PRE" - + "EMPTIBLE\020\002\"c\n\022ManagedGroupConfig\022#\n\026inst" - + "ance_template_name\030\001 \001(\tB\003\340A\003\022(\n\033instanc" - + "e_group_manager_name\030\002 \001(\tB\003\340A\003\"L\n\021Accel" - + "eratorConfig\022\034\n\024accelerator_type_uri\030\001 \001" - + "(\t\022\031\n\021accelerator_count\030\002 \001(\005\"f\n\nDiskCon" - + "fig\022\033\n\016boot_disk_type\030\003 \001(\tB\003\340A\001\022\036\n\021boot" - + "_disk_size_gb\030\001 \001(\005B\003\340A\001\022\033\n\016num_local_ss" - + "ds\030\002 \001(\005B\003\340A\001\"s\n\030NodeInitializationActio" - + "n\022\034\n\017executable_file\030\001 \001(\tB\003\340A\002\0229\n\021execu" - + "tion_timeout\030\002 \001(\0132\031.google.protobuf.Dur" - + "ationB\003\340A\001\"\204\003\n\rClusterStatus\022A\n\005state\030\001 " - + "\001(\0162-.google.cloud.dataproc.v1.ClusterSt" - + "atus.StateB\003\340A\003\022\026\n\006detail\030\002 \001(\tB\006\340A\003\340A\001\022" - + "9\n\020state_start_time\030\003 \001(\0132\032.google.proto" - + "buf.TimestampB\003\340A\003\022G\n\010substate\030\004 \001(\01620.g" - + "oogle.cloud.dataproc.v1.ClusterStatus.Su" - + "bstateB\003\340A\003\"V\n\005State\022\013\n\007UNKNOWN\020\000\022\014\n\010CRE" - + "ATING\020\001\022\013\n\007RUNNING\020\002\022\t\n\005ERROR\020\003\022\014\n\010DELET" - + "ING\020\004\022\014\n\010UPDATING\020\005\"<\n\010Substate\022\017\n\013UNSPE" - + "CIFIED\020\000\022\r\n\tUNHEALTHY\020\001\022\020\n\014STALE_STATUS\020" - + "\002\"S\n\016SecurityConfig\022A\n\017kerberos_config\030\001" - + " \001(\0132(.google.cloud.dataproc.v1.Kerberos" - + "Config\"\220\004\n\016KerberosConfig\022\034\n\017enable_kerb" - + "eros\030\001 \001(\010B\003\340A\001\022(\n\033root_principal_passwo" - + "rd_uri\030\002 \001(\tB\003\340A\002\022\030\n\013kms_key_uri\030\003 \001(\tB\003" - + "\340A\002\022\031\n\014keystore_uri\030\004 \001(\tB\003\340A\001\022\033\n\016trusts" - + "tore_uri\030\005 \001(\tB\003\340A\001\022\"\n\025keystore_password" - + "_uri\030\006 \001(\tB\003\340A\001\022\035\n\020key_password_uri\030\007 \001(" - + "\tB\003\340A\001\022$\n\027truststore_password_uri\030\010 \001(\tB" - + "\003\340A\001\022$\n\027cross_realm_trust_realm\030\t \001(\tB\003\340" - + "A\001\022\"\n\025cross_realm_trust_kdc\030\n \001(\tB\003\340A\001\022+" - + "\n\036cross_realm_trust_admin_server\030\013 \001(\tB\003" - + "\340A\001\0222\n%cross_realm_trust_shared_password" - + "_uri\030\014 \001(\tB\003\340A\001\022\033\n\016kdc_db_key_uri\030\r \001(\tB" - + "\003\340A\001\022\037\n\022tgt_lifetime_hours\030\016 \001(\005B\003\340A\001\022\022\n" - + "\005realm\030\017 \001(\tB\003\340A\001\"\371\001\n\016SoftwareConfig\022\032\n\r" - + "image_version\030\001 \001(\tB\003\340A\001\022Q\n\nproperties\030\002" - + " \003(\01328.google.cloud.dataproc.v1.Software" - + "Config.PropertiesEntryB\003\340A\001\022E\n\023optional_" - + "components\030\003 \003(\0162#.google.cloud.dataproc" - + ".v1.ComponentB\003\340A\001\0321\n\017PropertiesEntry\022\013\n" - + "\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203\002\n\017Lifecy" - + "cleConfig\0227\n\017idle_delete_ttl\030\001 \001(\0132\031.goo" - + "gle.protobuf.DurationB\003\340A\001\022;\n\020auto_delet" - + "e_time\030\002 \001(\0132\032.google.protobuf.Timestamp" - + "B\003\340A\001H\000\0229\n\017auto_delete_ttl\030\003 \001(\0132\031.googl" - + "e.protobuf.DurationB\003\340A\001H\000\0228\n\017idle_start" - + "_time\030\004 \001(\0132\032.google.protobuf.TimestampB" - + "\003\340A\003B\005\n\003ttl\"\232\002\n\016ClusterMetrics\022O\n\014hdfs_m" - + "etrics\030\001 \003(\01329.google.cloud.dataproc.v1." - + "ClusterMetrics.HdfsMetricsEntry\022O\n\014yarn_" - + "metrics\030\002 \003(\01329.google.cloud.dataproc.v1" - + ".ClusterMetrics.YarnMetricsEntry\0322\n\020Hdfs" - + "MetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003" - + ":\0028\001\0322\n\020YarnMetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n" - + "\005value\030\002 \001(\003:\0028\001\"\226\001\n\024CreateClusterReques" - + "t\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001" - + "(\tB\003\340A\002\0227\n\007cluster\030\002 \001(\0132!.google.cloud." - + "dataproc.v1.ClusterB\003\340A\002\022\027\n\nrequest_id\030\004" - + " \001(\tB\003\340A\001\"\256\002\n\024UpdateClusterRequest\022\027\n\npr" - + "oject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\005 \001(\tB\003\340A\002" - + "\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\0227\n\007cluster\030\003" - + " \001(\0132!.google.cloud.dataproc.v1.ClusterB" - + "\003\340A\002\022E\n\035graceful_decommission_timeout\030\006 " - + "\001(\0132\031.google.protobuf.DurationB\003\340A\001\0224\n\013u" - + "pdate_mask\030\004 \001(\0132\032.google.protobuf.Field" - + "MaskB\003\340A\002\022\027\n\nrequest_id\030\007 \001(\tB\003\340A\001\"\223\001\n\024D" - + "eleteClusterRequest\022\027\n\nproject_id\030\001 \001(\tB" - + "\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_nam" - + "e\030\002 \001(\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 \001(\tB\003\340A\001\022" - + "\027\n\nrequest_id\030\005 \001(\tB\003\340A\001\"\\\n\021GetClusterRe" - + "quest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region" - + "\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"\211" - + "\001\n\023ListClustersRequest\022\027\n\nproject_id\030\001 \001" - + "(\tB\003\340A\002\022\023\n\006region\030\004 \001(\tB\003\340A\002\022\023\n\006filter\030\005" - + " \001(\tB\003\340A\001\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npag" - + "e_token\030\003 \001(\tB\003\340A\001\"n\n\024ListClustersRespon" - + "se\0228\n\010clusters\030\001 \003(\0132!.google.cloud.data" - + "proc.v1.ClusterB\003\340A\003\022\034\n\017next_page_token\030" - + "\002 \001(\tB\003\340A\003\"a\n\026DiagnoseClusterRequest\022\027\n\n" - + "project_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340" - + "A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"1\n\026Diagnos" - + "eClusterResults\022\027\n\noutput_uri\030\001 \001(\tB\003\340A\003" - + "\"\370\001\n\023ReservationAffinity\022Y\n\030consume_rese" - + "rvation_type\030\001 \001(\01622.google.cloud.datapr" - + "oc.v1.ReservationAffinity.TypeB\003\340A\001\022\020\n\003k" - + "ey\030\002 \001(\tB\003\340A\001\022\023\n\006values\030\003 \003(\tB\003\340A\001\"_\n\004Ty" - + "pe\022\024\n\020TYPE_UNSPECIFIED\020\000\022\022\n\016NO_RESERVATI" - + "ON\020\001\022\023\n\017ANY_RESERVATION\020\002\022\030\n\024SPECIFIC_RE" - + "SERVATION\020\0032\377\014\n\021ClusterController\022\200\002\n\rCr" - + "eateCluster\022..google.cloud.dataproc.v1.C" - + "reateClusterRequest\032\035.google.longrunning" - + ".Operation\"\237\001\202\323\344\223\002>\"3/v1/projects/{proje" - + "ct_id}/regions/{region}/clusters:\007cluste" - + "r\332A\031project_id,region,cluster\312A<\n\007Cluste" - + "r\0221google.cloud.dataproc.v1.ClusterOpera" - + "tionMetadata\022\250\002\n\rUpdateCluster\022..google." - + "cloud.dataproc.v1.UpdateClusterRequest\032\035" - + ".google.longrunning.Operation\"\307\001\202\323\344\223\002M2B" - + "/v1/projects/{project_id}/regions/{regio" - + "n}/clusters/{cluster_name}:\007cluster\332A2pr" - + "oject_id,region,cluster_name,cluster,upd" - + "ate_mask\312A<\n\007Cluster\0221google.cloud.datap" - + "roc.v1.ClusterOperationMetadata\022\231\002\n\rDele" - + "teCluster\022..google.cloud.dataproc.v1.Del" - + "eteClusterRequest\032\035.google.longrunning.O" - + "peration\"\270\001\202\323\344\223\002D*B/v1/projects/{project" - + "_id}/regions/{region}/clusters/{cluster_" - + "name}\332A\036project_id,region,cluster_name\312A" - + "J\n\025google.protobuf.Empty\0221google.cloud.d" - + "ataproc.v1.ClusterOperationMetadata\022\311\001\n\n" - + "GetCluster\022+.google.cloud.dataproc.v1.Ge" - + "tClusterRequest\032!.google.cloud.dataproc." - + "v1.Cluster\"k\202\323\344\223\002D\022B/v1/projects/{projec" - + "t_id}/regions/{region}/clusters/{cluster" - + "_name}\332A\036project_id,region,cluster_name\022" - + "\331\001\n\014ListClusters\022-.google.cloud.dataproc" - + ".v1.ListClustersRequest\032..google.cloud.d" - + "ataproc.v1.ListClustersResponse\"j\202\323\344\223\0025\022" - + "3/v1/projects/{project_id}/regions/{regi" - + "on}/clusters\332A\021project_id,region\332A\030proje" - + "ct_id,region,filter\022\252\002\n\017DiagnoseCluster\022" - + "0.google.cloud.dataproc.v1.DiagnoseClust" - + "erRequest\032\035.google.longrunning.Operation" - + "\"\305\001\202\323\344\223\002P\"K/v1/projects/{project_id}/reg" - + "ions/{region}/clusters/{cluster_name}:di" - + "agnose:\001*\332A\036project_id,region,cluster_na" - + "me\312AK\n\026DiagnoseClusterResults\0221google.cl" - + "oud.dataproc.v1.ClusterOperationMetadata" - + "\032K\312A\027dataproc.googleapis.com\322A.https://w" - + "ww.googleapis.com/auth/cloud-platformBq\n" - + "\034com.google.cloud.dataproc.v1B\rClustersP" - + "rotoP\001Z@google.golang.org/genproto/googl" - + "eapis/cloud/dataproc/v1;dataprocb\006proto3" + + "to\032\037google/api/field_behavior.proto\032\031goo" + + "gle/api/resource.proto\032%google/cloud/dat" + + "aproc/v1/shared.proto\032#google/longrunnin" + + "g/operations.proto\032\036google/protobuf/dura" + + "tion.proto\032 google/protobuf/field_mask.p" + + "roto\032\037google/protobuf/timestamp.proto\"\315\003" + + "\n\007Cluster\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\031\n\014cl" + + "uster_name\030\002 \001(\tB\003\340A\002\022<\n\006config\030\003 \001(\0132\'." + + "google.cloud.dataproc.v1.ClusterConfigB\003" + + "\340A\002\022B\n\006labels\030\010 \003(\0132-.google.cloud.datap" + + "roc.v1.Cluster.LabelsEntryB\003\340A\001\022<\n\006statu" + + "s\030\004 \001(\0132\'.google.cloud.dataproc.v1.Clust" + + "erStatusB\003\340A\003\022D\n\016status_history\030\007 \003(\0132\'." + + "google.cloud.dataproc.v1.ClusterStatusB\003" + + "\340A\003\022\031\n\014cluster_uuid\030\006 \001(\tB\003\340A\003\022>\n\007metric" + + "s\030\t \001(\0132(.google.cloud.dataproc.v1.Clust" + + "erMetricsB\003\340A\003\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001" + + "(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\251\010\n\rClusterConfig\022" + + "\032\n\rconfig_bucket\030\001 \001(\tB\003\340A\001\022\030\n\013temp_buck" + + "et\030\002 \001(\tB\003\340A\001\022K\n\022gce_cluster_config\030\010 \001(" + + "\0132*.google.cloud.dataproc.v1.GceClusterC" + + "onfigB\003\340A\001\022I\n\rmaster_config\030\t \001(\0132-.goog" + + "le.cloud.dataproc.v1.InstanceGroupConfig" + + "B\003\340A\001\022I\n\rworker_config\030\n \001(\0132-.google.cl" + + "oud.dataproc.v1.InstanceGroupConfigB\003\340A\001" + + "\022S\n\027secondary_worker_config\030\014 \001(\0132-.goog" + + "le.cloud.dataproc.v1.InstanceGroupConfig" + + "B\003\340A\001\022F\n\017software_config\030\r \001(\0132(.google." + + "cloud.dataproc.v1.SoftwareConfigB\003\340A\001\022W\n" + + "\026initialization_actions\030\013 \003(\01322.google.c" + + "loud.dataproc.v1.NodeInitializationActio" + + "nB\003\340A\001\022J\n\021encryption_config\030\017 \001(\0132*.goog" + + "le.cloud.dataproc.v1.EncryptionConfigB\003\340" + + "A\001\022L\n\022autoscaling_config\030\022 \001(\0132+.google." + + "cloud.dataproc.v1.AutoscalingConfigB\003\340A\001" + + "\022F\n\017security_config\030\020 \001(\0132(.google.cloud" + + ".dataproc.v1.SecurityConfigB\003\340A\001\022H\n\020life" + + "cycle_config\030\021 \001(\0132).google.cloud.datapr" + + "oc.v1.LifecycleConfigB\003\340A\001\022F\n\017endpoint_c" + + "onfig\030\023 \001(\0132(.google.cloud.dataproc.v1.E" + + "ndpointConfigB\003\340A\001\022H\n\020metastore_config\030\024" + + " \001(\0132).google.cloud.dataproc.v1.Metastor" + + "eConfigB\003\340A\001\022K\n\022gke_cluster_config\030\025 \001(\013" + + "2*.google.cloud.dataproc.v1.GkeClusterCo" + + "nfigB\003\340A\001\"\223\002\n\020GkeClusterConfig\022w\n namesp" + + "aced_gke_deployment_target\030\001 \001(\0132H.googl" + + "e.cloud.dataproc.v1.GkeClusterConfig.Nam" + + "espacedGkeDeploymentTargetB\003\340A\001\032\205\001\n\035Name" + + "spacedGkeDeploymentTarget\022D\n\022target_gke_" + + "cluster\030\001 \001(\tB(\340A\001\372A\"\n container.googlea" + + "pis.com/Cluster\022\036\n\021cluster_namespace\030\002 \001" + + "(\tB\003\340A\001\"\272\001\n\016EndpointConfig\022P\n\nhttp_ports" + + "\030\001 \003(\01327.google.cloud.dataproc.v1.Endpoi" + + "ntConfig.HttpPortsEntryB\003\340A\003\022$\n\027enable_h" + + "ttp_port_access\030\002 \001(\010B\003\340A\001\0320\n\016HttpPortsE" + + "ntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\",\n\021" + + "AutoscalingConfig\022\027\n\npolicy_uri\030\001 \001(\tB\003\340" + + "A\001\"4\n\020EncryptionConfig\022 \n\023gce_pd_kms_key" + + "_name\030\001 \001(\tB\003\340A\001\"\272\006\n\020GceClusterConfig\022\025\n" + + "\010zone_uri\030\001 \001(\tB\003\340A\001\022\030\n\013network_uri\030\002 \001(" + + "\tB\003\340A\001\022\033\n\016subnetwork_uri\030\006 \001(\tB\003\340A\001\022\035\n\020i" + + "nternal_ip_only\030\007 \001(\010B\003\340A\001\022k\n\032private_ip" + + "v6_google_access\030\014 \001(\0162B.google.cloud.da" + + "taproc.v1.GceClusterConfig.PrivateIpv6Go" + + "ogleAccessB\003\340A\001\022\034\n\017service_account\030\010 \001(\t" + + "B\003\340A\001\022#\n\026service_account_scopes\030\003 \003(\tB\003\340" + + "A\001\022\014\n\004tags\030\004 \003(\t\022J\n\010metadata\030\005 \003(\01328.goo" + + "gle.cloud.dataproc.v1.GceClusterConfig.M" + + "etadataEntry\022P\n\024reservation_affinity\030\013 \001" + + "(\0132-.google.cloud.dataproc.v1.Reservatio" + + "nAffinityB\003\340A\001\022M\n\023node_group_affinity\030\r " + + "\001(\0132+.google.cloud.dataproc.v1.NodeGroup" + + "AffinityB\003\340A\001\022W\n\030shielded_instance_confi" + + "g\030\016 \001(\01320.google.cloud.dataproc.v1.Shiel" + + "dedInstanceConfigB\003\340A\001\032/\n\rMetadataEntry\022" + + "\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203\001\n\027Priv" + + "ateIpv6GoogleAccess\022*\n&PRIVATE_IPV6_GOOG" + + "LE_ACCESS_UNSPECIFIED\020\000\022\033\n\027INHERIT_FROM_" + + "SUBNETWORK\020\001\022\014\n\010OUTBOUND\020\002\022\021\n\rBIDIRECTIO" + + "NAL\020\003\"0\n\021NodeGroupAffinity\022\033\n\016node_group" + + "_uri\030\001 \001(\tB\003\340A\002\"}\n\026ShieldedInstanceConfi" + + "g\022\037\n\022enable_secure_boot\030\001 \001(\010B\003\340A\001\022\030\n\013en" + + "able_vtpm\030\002 \001(\010B\003\340A\001\022(\n\033enable_integrity" + + "_monitoring\030\003 \001(\010B\003\340A\001\"\315\004\n\023InstanceGroup" + + "Config\022\032\n\rnum_instances\030\001 \001(\005B\003\340A\001\022\033\n\016in" + + "stance_names\030\002 \003(\tB\003\340A\003\022\026\n\timage_uri\030\003 \001" + + "(\tB\003\340A\001\022\035\n\020machine_type_uri\030\004 \001(\tB\003\340A\001\022>" + + "\n\013disk_config\030\005 \001(\0132$.google.cloud.datap" + + "roc.v1.DiskConfigB\003\340A\001\022\033\n\016is_preemptible" + + "\030\006 \001(\010B\003\340A\003\022Y\n\016preemptibility\030\n \001(\0162<.go" + + "ogle.cloud.dataproc.v1.InstanceGroupConf" + + "ig.PreemptibilityB\003\340A\001\022O\n\024managed_group_" + + "config\030\007 \001(\0132,.google.cloud.dataproc.v1." + + "ManagedGroupConfigB\003\340A\003\022F\n\014accelerators\030" + + "\010 \003(\0132+.google.cloud.dataproc.v1.Acceler" + + "atorConfigB\003\340A\001\022\035\n\020min_cpu_platform\030\t \001(" + + "\tB\003\340A\001\"V\n\016Preemptibility\022\036\n\032PREEMPTIBILI" + + "TY_UNSPECIFIED\020\000\022\023\n\017NON_PREEMPTIBLE\020\001\022\017\n" + + "\013PREEMPTIBLE\020\002\"c\n\022ManagedGroupConfig\022#\n\026" + + "instance_template_name\030\001 \001(\tB\003\340A\003\022(\n\033ins" + + "tance_group_manager_name\030\002 \001(\tB\003\340A\003\"L\n\021A" + + "cceleratorConfig\022\034\n\024accelerator_type_uri" + + "\030\001 \001(\t\022\031\n\021accelerator_count\030\002 \001(\005\"f\n\nDis" + + "kConfig\022\033\n\016boot_disk_type\030\003 \001(\tB\003\340A\001\022\036\n\021" + + "boot_disk_size_gb\030\001 \001(\005B\003\340A\001\022\033\n\016num_loca" + + "l_ssds\030\002 \001(\005B\003\340A\001\"s\n\030NodeInitializationA" + + "ction\022\034\n\017executable_file\030\001 \001(\tB\003\340A\002\0229\n\021e" + + "xecution_timeout\030\002 \001(\0132\031.google.protobuf" + + ".DurationB\003\340A\001\"\255\003\n\rClusterStatus\022A\n\005stat" + + "e\030\001 \001(\0162-.google.cloud.dataproc.v1.Clust" + + "erStatus.StateB\003\340A\003\022\026\n\006detail\030\002 \001(\tB\006\340A\003" + + "\340A\001\0229\n\020state_start_time\030\003 \001(\0132\032.google.p" + + "rotobuf.TimestampB\003\340A\003\022G\n\010substate\030\004 \001(\016" + + "20.google.cloud.dataproc.v1.ClusterStatu" + + "s.SubstateB\003\340A\003\"\177\n\005State\022\013\n\007UNKNOWN\020\000\022\014\n" + + "\010CREATING\020\001\022\013\n\007RUNNING\020\002\022\t\n\005ERROR\020\003\022\014\n\010D" + + "ELETING\020\004\022\014\n\010UPDATING\020\005\022\014\n\010STOPPING\020\006\022\013\n" + + "\007STOPPED\020\007\022\014\n\010STARTING\020\010\"<\n\010Substate\022\017\n\013" + + "UNSPECIFIED\020\000\022\r\n\tUNHEALTHY\020\001\022\020\n\014STALE_ST" + + "ATUS\020\002\"\240\001\n\016SecurityConfig\022F\n\017kerberos_co" + + "nfig\030\001 \001(\0132(.google.cloud.dataproc.v1.Ke" + + "rberosConfigB\003\340A\001\022F\n\017identity_config\030\002 \001" + + "(\0132(.google.cloud.dataproc.v1.IdentityCo" + + "nfigB\003\340A\001\"\220\004\n\016KerberosConfig\022\034\n\017enable_k" + + "erberos\030\001 \001(\010B\003\340A\001\022(\n\033root_principal_pas" + + "sword_uri\030\002 \001(\tB\003\340A\001\022\030\n\013kms_key_uri\030\003 \001(" + + "\tB\003\340A\001\022\031\n\014keystore_uri\030\004 \001(\tB\003\340A\001\022\033\n\016tru" + + "ststore_uri\030\005 \001(\tB\003\340A\001\022\"\n\025keystore_passw" + + "ord_uri\030\006 \001(\tB\003\340A\001\022\035\n\020key_password_uri\030\007" + + " \001(\tB\003\340A\001\022$\n\027truststore_password_uri\030\010 \001" + + "(\tB\003\340A\001\022$\n\027cross_realm_trust_realm\030\t \001(\t" + + "B\003\340A\001\022\"\n\025cross_realm_trust_kdc\030\n \001(\tB\003\340A" + + "\001\022+\n\036cross_realm_trust_admin_server\030\013 \001(" + + "\tB\003\340A\001\0222\n%cross_realm_trust_shared_passw" + + "ord_uri\030\014 \001(\tB\003\340A\001\022\033\n\016kdc_db_key_uri\030\r \001" + + "(\tB\003\340A\001\022\037\n\022tgt_lifetime_hours\030\016 \001(\005B\003\340A\001" + + "\022\022\n\005realm\030\017 \001(\tB\003\340A\001\"\306\001\n\016IdentityConfig\022" + + "r\n\034user_service_account_mapping\030\001 \003(\0132G." + + "google.cloud.dataproc.v1.IdentityConfig." + + "UserServiceAccountMappingEntryB\003\340A\002\032@\n\036U" + + "serServiceAccountMappingEntry\022\013\n\003key\030\001 \001" + + "(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\371\001\n\016SoftwareConfig" + + "\022\032\n\rimage_version\030\001 \001(\tB\003\340A\001\022Q\n\nproperti" + + "es\030\002 \003(\01328.google.cloud.dataproc.v1.Soft" + + "wareConfig.PropertiesEntryB\003\340A\001\022E\n\023optio" + + "nal_components\030\003 \003(\0162#.google.cloud.data" + + "proc.v1.ComponentB\003\340A\001\0321\n\017PropertiesEntr" + + "y\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203\002\n\017Li" + + "fecycleConfig\0227\n\017idle_delete_ttl\030\001 \001(\0132\031" + + ".google.protobuf.DurationB\003\340A\001\022;\n\020auto_d" + + "elete_time\030\002 \001(\0132\032.google.protobuf.Times" + + "tampB\003\340A\001H\000\0229\n\017auto_delete_ttl\030\003 \001(\0132\031.g" + + "oogle.protobuf.DurationB\003\340A\001H\000\0228\n\017idle_s" + + "tart_time\030\004 \001(\0132\032.google.protobuf.Timest" + + "ampB\003\340A\003B\005\n\003ttl\"_\n\017MetastoreConfig\022L\n\032da" + + "taproc_metastore_service\030\001 \001(\tB(\340A\002\372A\"\n " + + "metastore.googleapis.com/Service\"\232\002\n\016Clu" + + "sterMetrics\022O\n\014hdfs_metrics\030\001 \003(\01329.goog" + + "le.cloud.dataproc.v1.ClusterMetrics.Hdfs" + + "MetricsEntry\022O\n\014yarn_metrics\030\002 \003(\01329.goo" + + "gle.cloud.dataproc.v1.ClusterMetrics.Yar" + + "nMetricsEntry\0322\n\020HdfsMetricsEntry\022\013\n\003key" + + "\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001\0322\n\020YarnMetrics" + + "Entry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001\"\226\001" + + "\n\024CreateClusterRequest\022\027\n\nproject_id\030\001 \001" + + "(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\0227\n\007cluster\030" + + "\002 \001(\0132!.google.cloud.dataproc.v1.Cluster" + + "B\003\340A\002\022\027\n\nrequest_id\030\004 \001(\tB\003\340A\001\"\256\002\n\024Updat" + + "eClusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002" + + "\022\023\n\006region\030\005 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 " + + "\001(\tB\003\340A\002\0227\n\007cluster\030\003 \001(\0132!.google.cloud" + + ".dataproc.v1.ClusterB\003\340A\002\022E\n\035graceful_de" + + "commission_timeout\030\006 \001(\0132\031.google.protob" + + "uf.DurationB\003\340A\001\0224\n\013update_mask\030\004 \001(\0132\032." + + "google.protobuf.FieldMaskB\003\340A\002\022\027\n\nreques" + + "t_id\030\007 \001(\tB\003\340A\001\"\221\001\n\022StopClusterRequest\022\027" + + "\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\002 \001(\tB" + + "\003\340A\002\022\031\n\014cluster_name\030\003 \001(\tB\003\340A\002\022\031\n\014clust" + + "er_uuid\030\004 \001(\tB\003\340A\001\022\027\n\nrequest_id\030\005 \001(\tB\003" + + "\340A\001\"\222\001\n\023StartClusterRequest\022\027\n\nproject_i" + + "d\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\002 \001(\tB\003\340A\002\022\031\n\014clu" + + "ster_name\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 \001" + + "(\tB\003\340A\001\022\027\n\nrequest_id\030\005 \001(\tB\003\340A\001\"\223\001\n\024Del" + + "eteClusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340" + + "A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030" + + "\002 \001(\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 \001(\tB\003\340A\001\022\027\n" + + "\nrequest_id\030\005 \001(\tB\003\340A\001\"\\\n\021GetClusterRequ" + + "est\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003" + + " \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"\211\001\n" + + "\023ListClustersRequest\022\027\n\nproject_id\030\001 \001(\t" + + "B\003\340A\002\022\023\n\006region\030\004 \001(\tB\003\340A\002\022\023\n\006filter\030\005 \001" + + "(\tB\003\340A\001\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npage_" + + "token\030\003 \001(\tB\003\340A\001\"n\n\024ListClustersResponse" + + "\0228\n\010clusters\030\001 \003(\0132!.google.cloud.datapr" + + "oc.v1.ClusterB\003\340A\003\022\034\n\017next_page_token\030\002 " + + "\001(\tB\003\340A\003\"a\n\026DiagnoseClusterRequest\022\027\n\npr" + + "oject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002" + + "\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"1\n\026DiagnoseC" + + "lusterResults\022\027\n\noutput_uri\030\001 \001(\tB\003\340A\003\"\370" + + "\001\n\023ReservationAffinity\022Y\n\030consume_reserv" + + "ation_type\030\001 \001(\01622.google.cloud.dataproc" + + ".v1.ReservationAffinity.TypeB\003\340A\001\022\020\n\003key" + + "\030\002 \001(\tB\003\340A\001\022\023\n\006values\030\003 \003(\tB\003\340A\001\"_\n\004Type" + + "\022\024\n\020TYPE_UNSPECIFIED\020\000\022\022\n\016NO_RESERVATION" + + "\020\001\022\023\n\017ANY_RESERVATION\020\002\022\030\n\024SPECIFIC_RESE" + + "RVATION\020\0032\344\020\n\021ClusterController\022\200\002\n\rCrea" + + "teCluster\022..google.cloud.dataproc.v1.Cre" + + "ateClusterRequest\032\035.google.longrunning.O" + + "peration\"\237\001\202\323\344\223\002>\"3/v1/projects/{project" + + "_id}/regions/{region}/clusters:\007cluster\332" + + "A\031project_id,region,cluster\312A<\n\007Cluster\022" + + "1google.cloud.dataproc.v1.ClusterOperati" + + "onMetadata\022\250\002\n\rUpdateCluster\022..google.cl" + + "oud.dataproc.v1.UpdateClusterRequest\032\035.g" + + "oogle.longrunning.Operation\"\307\001\202\323\344\223\002M2B/v" + + "1/projects/{project_id}/regions/{region}" + + "/clusters/{cluster_name}:\007cluster\332A2proj" + + "ect_id,region,cluster_name,cluster,updat" + + "e_mask\312A<\n\007Cluster\0221google.cloud.datapro" + + "c.v1.ClusterOperationMetadata\022\356\001\n\013StopCl" + + "uster\022,.google.cloud.dataproc.v1.StopClu" + + "sterRequest\032\035.google.longrunning.Operati" + + "on\"\221\001\202\323\344\223\002L\"G/v1/projects/{project_id}/r" + + "egions/{region}/clusters/{cluster_name}:" + + "stop:\001*\312A<\n\007Cluster\0221google.cloud.datapr" + + "oc.v1.ClusterOperationMetadata\022\361\001\n\014Start" + + "Cluster\022-.google.cloud.dataproc.v1.Start" + + "ClusterRequest\032\035.google.longrunning.Oper" + + "ation\"\222\001\202\323\344\223\002M\"H/v1/projects/{project_id" + + "}/regions/{region}/clusters/{cluster_nam" + + "e}:start:\001*\312A<\n\007Cluster\0221google.cloud.da" + + "taproc.v1.ClusterOperationMetadata\022\231\002\n\rD" + + "eleteCluster\022..google.cloud.dataproc.v1." + + "DeleteClusterRequest\032\035.google.longrunnin" + + "g.Operation\"\270\001\202\323\344\223\002D*B/v1/projects/{proj" + + "ect_id}/regions/{region}/clusters/{clust" + + "er_name}\332A\036project_id,region,cluster_nam" + + "e\312AJ\n\025google.protobuf.Empty\0221google.clou" + + "d.dataproc.v1.ClusterOperationMetadata\022\311" + + "\001\n\nGetCluster\022+.google.cloud.dataproc.v1" + + ".GetClusterRequest\032!.google.cloud.datapr" + + "oc.v1.Cluster\"k\202\323\344\223\002D\022B/v1/projects/{pro" + + "ject_id}/regions/{region}/clusters/{clus" + + "ter_name}\332A\036project_id,region,cluster_na" + + "me\022\331\001\n\014ListClusters\022-.google.cloud.datap" + + "roc.v1.ListClustersRequest\032..google.clou" + + "d.dataproc.v1.ListClustersResponse\"j\202\323\344\223" + + "\0025\0223/v1/projects/{project_id}/regions/{r" + + "egion}/clusters\332A\021project_id,region\332A\030pr" + + "oject_id,region,filter\022\252\002\n\017DiagnoseClust" + + "er\0220.google.cloud.dataproc.v1.DiagnoseCl" + + "usterRequest\032\035.google.longrunning.Operat" + + "ion\"\305\001\202\323\344\223\002P\"K/v1/projects/{project_id}/" + + "regions/{region}/clusters/{cluster_name}" + + ":diagnose:\001*\332A\036project_id,region,cluster" + + "_name\312AK\n\026DiagnoseClusterResults\0221google" + + ".cloud.dataproc.v1.ClusterOperationMetad" + + "ata\032K\312A\027dataproc.googleapis.com\322A.https:" + + "//www.googleapis.com/auth/cloud-platform" + + "B\263\002\n\034com.google.cloud.dataproc.v1B\rClust" + + "ersProtoP\001Z@google.golang.org/genproto/g" + + "oogleapis/cloud/dataproc/v1;dataproc\352A^\n" + + " container.googleapis.com/Cluster\022:proje" + + "cts/{project}/locations/{location}/clust" + + "ers/{cluster}\352A^\n metastore.googleapis.c" + + "om/Service\022:projects/{project}/locations" + + "/{location}/services/{service}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -381,6 +478,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.AnnotationsProto.getDescriptor(), com.google.api.ClientProto.getDescriptor(), com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), com.google.cloud.dataproc.v1.SharedProto.getDescriptor(), com.google.longrunning.OperationsProto.getDescriptor(), com.google.protobuf.DurationProto.getDescriptor(), @@ -429,9 +527,29 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "SecurityConfig", "LifecycleConfig", "EndpointConfig", + "MetastoreConfig", + "GkeClusterConfig", }); - internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor = + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor, + new java.lang.String[] { + "NamespacedGkeDeploymentTarget", + }); + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor = + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor, + new java.lang.String[] { + "TargetGkeCluster", "ClusterNamespace", + }); + internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor = + getDescriptor().getMessageTypes().get(3); internal_static_google_cloud_dataproc_v1_EndpointConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor, @@ -447,7 +565,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Key", "Value", }); internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_google_cloud_dataproc_v1_AutoscalingConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor, @@ -455,7 +573,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "PolicyUri", }); internal_static_google_cloud_dataproc_v1_EncryptionConfig_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_google_cloud_dataproc_v1_EncryptionConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_EncryptionConfig_descriptor, @@ -463,7 +581,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "GcePdKmsKeyName", }); internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_google_cloud_dataproc_v1_GceClusterConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor, @@ -472,11 +590,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "NetworkUri", "SubnetworkUri", "InternalIpOnly", + "PrivateIpv6GoogleAccess", "ServiceAccount", "ServiceAccountScopes", "Tags", "Metadata", "ReservationAffinity", + "NodeGroupAffinity", + "ShieldedInstanceConfig", }); internal_static_google_cloud_dataproc_v1_GceClusterConfig_MetadataEntry_descriptor = internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor @@ -488,8 +609,24 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor, + new java.lang.String[] { + "NodeGroupUri", + }); + internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor, + new java.lang.String[] { + "EnableSecureBoot", "EnableVtpm", "EnableIntegrityMonitoring", + }); internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(9); internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_descriptor, @@ -506,7 +643,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "MinCpuPlatform", }); internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(10); internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_descriptor, @@ -514,7 +651,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstanceTemplateName", "InstanceGroupManagerName", }); internal_static_google_cloud_dataproc_v1_AcceleratorConfig_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(11); internal_static_google_cloud_dataproc_v1_AcceleratorConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_AcceleratorConfig_descriptor, @@ -522,7 +659,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "AcceleratorTypeUri", "AcceleratorCount", }); internal_static_google_cloud_dataproc_v1_DiskConfig_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(12); internal_static_google_cloud_dataproc_v1_DiskConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DiskConfig_descriptor, @@ -530,7 +667,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "BootDiskType", "BootDiskSizeGb", "NumLocalSsds", }); internal_static_google_cloud_dataproc_v1_NodeInitializationAction_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(13); internal_static_google_cloud_dataproc_v1_NodeInitializationAction_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_NodeInitializationAction_descriptor, @@ -538,7 +675,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ExecutableFile", "ExecutionTimeout", }); internal_static_google_cloud_dataproc_v1_ClusterStatus_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(14); internal_static_google_cloud_dataproc_v1_ClusterStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ClusterStatus_descriptor, @@ -546,15 +683,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "State", "Detail", "StateStartTime", "Substate", }); internal_static_google_cloud_dataproc_v1_SecurityConfig_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(15); internal_static_google_cloud_dataproc_v1_SecurityConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_SecurityConfig_descriptor, new java.lang.String[] { - "KerberosConfig", + "KerberosConfig", "IdentityConfig", }); internal_static_google_cloud_dataproc_v1_KerberosConfig_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(16); internal_static_google_cloud_dataproc_v1_KerberosConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_KerberosConfig_descriptor, @@ -575,8 +712,24 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "TgtLifetimeHours", "Realm", }); + internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_dataproc_v1_IdentityConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor, + new java.lang.String[] { + "UserServiceAccountMapping", + }); + internal_static_google_cloud_dataproc_v1_IdentityConfig_UserServiceAccountMappingEntry_descriptor = + internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1_IdentityConfig_UserServiceAccountMappingEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_IdentityConfig_UserServiceAccountMappingEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); internal_static_google_cloud_dataproc_v1_SoftwareConfig_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(18); internal_static_google_cloud_dataproc_v1_SoftwareConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_SoftwareConfig_descriptor, @@ -592,15 +745,23 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Key", "Value", }); internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(19); internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor, new java.lang.String[] { "IdleDeleteTtl", "AutoDeleteTime", "AutoDeleteTtl", "IdleStartTime", "Ttl", }); + internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_google_cloud_dataproc_v1_MetastoreConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor, + new java.lang.String[] { + "DataprocMetastoreService", + }); internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(21); internal_static_google_cloud_dataproc_v1_ClusterMetrics_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor, @@ -624,7 +785,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Key", "Value", }); internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(22); internal_static_google_cloud_dataproc_v1_CreateClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor, @@ -632,7 +793,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "Cluster", "RequestId", }); internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(23); internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor, @@ -645,8 +806,24 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "UpdateMask", "RequestId", }); + internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(24); + internal_static_google_cloud_dataproc_v1_StopClusterRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor, + new java.lang.String[] { + "ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId", + }); + internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(25); + internal_static_google_cloud_dataproc_v1_StartClusterRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor, + new java.lang.String[] { + "ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId", + }); internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(26); internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor, @@ -654,7 +831,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId", }); internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(27); internal_static_google_cloud_dataproc_v1_GetClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor, @@ -662,7 +839,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "ClusterName", }); internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(28); internal_static_google_cloud_dataproc_v1_ListClustersRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor, @@ -670,7 +847,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "Filter", "PageSize", "PageToken", }); internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(29); internal_static_google_cloud_dataproc_v1_ListClustersResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor, @@ -678,7 +855,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Clusters", "NextPageToken", }); internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(30); internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor, @@ -686,7 +863,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "ClusterName", }); internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(31); internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor, @@ -694,7 +871,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "OutputUri", }); internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(32); internal_static_google_cloud_dataproc_v1_ReservationAffinity_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor, @@ -708,12 +885,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { registry.add(com.google.api.AnnotationsProto.http); registry.add(com.google.api.ClientProto.methodSignature); registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); registry.add(com.google.longrunning.OperationsProto.operationInfo); com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( descriptor, registry); com.google.api.AnnotationsProto.getDescriptor(); com.google.api.ClientProto.getDescriptor(); com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); com.google.cloud.dataproc.v1.SharedProto.getDescriptor(); com.google.longrunning.OperationsProto.getDescriptor(); com.google.protobuf.DurationProto.getDescriptor(); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java index 8a8254b3..1f8afa86 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java @@ -23,6 +23,7 @@ * *
  * Cluster components that can be activated.
+ * Next ID: 16.
  * 
* * Protobuf enum {@code google.cloud.dataproc.v1.Component} @@ -42,12 +43,56 @@ public enum Component implements com.google.protobuf.ProtocolMessageEnum { * * *
-   * The Anaconda python distribution.
+   * The Anaconda python distribution. The Anaconda component is not supported
+   * in the Dataproc
+   * <a
+   * href="/dataproc/docs/concepts/versioning/dataproc-release-2.0">2.0
+   * image</a>. The 2.0 image is pre-installed with Miniconda.
    * 
* * ANACONDA = 5; */ ANACONDA(5), + /** + * + * + *
+   * Docker
+   * 
+ * + * DOCKER = 13; + */ + DOCKER(13), + /** + * + * + *
+   * The Druid query engine. (alpha)
+   * 
+ * + * DRUID = 9; + */ + DRUID(9), + /** + * + * + *
+   * Flink
+   * 
+ * + * FLINK = 14; + */ + FLINK(14), + /** + * + * + *
+   * HBase. (beta)
+   * 
+ * + * HBASE = 11; + */ + HBASE(11), /** * * @@ -78,6 +123,26 @@ public enum Component implements com.google.protobuf.ProtocolMessageEnum { * PRESTO = 6; */ PRESTO(6), + /** + * + * + *
+   * The Ranger service.
+   * 
+ * + * RANGER = 12; + */ + RANGER(12), + /** + * + * + *
+   * The Solr service.
+   * 
+ * + * SOLR = 10; + */ + SOLR(10), /** * * @@ -115,12 +180,56 @@ public enum Component implements com.google.protobuf.ProtocolMessageEnum { * * *
-   * The Anaconda python distribution.
+   * The Anaconda python distribution. The Anaconda component is not supported
+   * in the Dataproc
+   * <a
+   * href="/dataproc/docs/concepts/versioning/dataproc-release-2.0">2.0
+   * image</a>. The 2.0 image is pre-installed with Miniconda.
    * 
* * ANACONDA = 5; */ public static final int ANACONDA_VALUE = 5; + /** + * + * + *
+   * Docker
+   * 
+ * + * DOCKER = 13; + */ + public static final int DOCKER_VALUE = 13; + /** + * + * + *
+   * The Druid query engine. (alpha)
+   * 
+ * + * DRUID = 9; + */ + public static final int DRUID_VALUE = 9; + /** + * + * + *
+   * Flink
+   * 
+ * + * FLINK = 14; + */ + public static final int FLINK_VALUE = 14; + /** + * + * + *
+   * HBase. (beta)
+   * 
+ * + * HBASE = 11; + */ + public static final int HBASE_VALUE = 11; /** * * @@ -151,6 +260,26 @@ public enum Component implements com.google.protobuf.ProtocolMessageEnum { * PRESTO = 6; */ public static final int PRESTO_VALUE = 6; + /** + * + * + *
+   * The Ranger service.
+   * 
+ * + * RANGER = 12; + */ + public static final int RANGER_VALUE = 12; + /** + * + * + *
+   * The Solr service.
+   * 
+ * + * SOLR = 10; + */ + public static final int SOLR_VALUE = 10; /** * * @@ -200,12 +329,24 @@ public static Component forNumber(int value) { return COMPONENT_UNSPECIFIED; case 5: return ANACONDA; + case 13: + return DOCKER; + case 9: + return DRUID; + case 14: + return FLINK; + case 11: + return HBASE; case 3: return HIVE_WEBHCAT; case 1: return JUPYTER; case 6: return PRESTO; + case 12: + return RANGER; + case 10: + return SOLR; case 4: return ZEPPELIN; case 8: diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java index 96d8fb87..dbb97c33 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java @@ -298,9 +298,9 @@ public com.google.cloud.dataproc.v1.ClusterOrBuilder getClusterOrBuilder() { * * *
-   * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * Optional. A unique id used to identify the request. If the server receives two
+   * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -329,9 +329,9 @@ public java.lang.String getRequestId() {
    *
    *
    * 
-   * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * Optional. A unique id used to identify the request. If the server receives two
+   * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -1160,9 +1160,9 @@ public com.google.cloud.dataproc.v1.ClusterOrBuilder getClusterOrBuilder() {
      *
      *
      * 
-     * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * Optional. A unique id used to identify the request. If the server receives two
+     * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1190,9 +1190,9 @@ public java.lang.String getRequestId() {
      *
      *
      * 
-     * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * Optional. A unique id used to identify the request. If the server receives two
+     * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1220,9 +1220,9 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      *
      * 
-     * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * Optional. A unique id used to identify the request. If the server receives two
+     * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1249,9 +1249,9 @@ public Builder setRequestId(java.lang.String value) {
      *
      *
      * 
-     * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * Optional. A unique id used to identify the request. If the server receives two
+     * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1274,9 +1274,9 @@ public Builder clearRequestId() {
      *
      *
      * 
-     * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * Optional. A unique id used to identify the request. If the server receives two
+     * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java
index 699e931a..9e5a0bdd 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java
@@ -117,9 +117,9 @@ public interface CreateClusterRequestOrBuilder
    *
    *
    * 
-   * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * Optional. A unique id used to identify the request. If the server receives two
+   * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -137,9 +137,9 @@ public interface CreateClusterRequestOrBuilder
    *
    *
    * 
-   * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * Optional. A unique id used to identify the request. If the server receives two
+   * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java
index cb49ccee..0ae15440 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java
@@ -350,8 +350,9 @@ public com.google.protobuf.ByteString getClusterUuidBytes() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
@@ -381,8 +382,9 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
@@ -1230,8 +1232,9 @@ public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -1260,8 +1263,9 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -1290,8 +1294,9 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -1319,8 +1324,9 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -1344,8 +1350,9 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java
index 532cf74d..04b305eb 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java
@@ -132,8 +132,9 @@ public interface DeleteClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
@@ -152,8 +153,9 @@ public interface DeleteClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java
index 60cd4baf..4e39c6bc 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java
@@ -128,8 +128,10 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    * 
    * Optional. Type of the boot disk (default is "pd-standard").
-   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+   * "pd-ssd" (Persistent Disk Solid State Drive),
+   * or "pd-standard" (Persistent Disk Hard Disk Drive).
+   * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
    * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; @@ -153,8 +155,10 @@ public java.lang.String getBootDiskType() { * *
    * Optional. Type of the boot disk (default is "pd-standard").
-   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+   * "pd-ssd" (Persistent Disk Solid State Drive),
+   * or "pd-standard" (Persistent Disk Hard Disk Drive).
+   * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
    * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; @@ -566,8 +570,10 @@ public Builder mergeFrom( * *
      * Optional. Type of the boot disk (default is "pd-standard").
-     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+     * "pd-ssd" (Persistent Disk Solid State Drive),
+     * or "pd-standard" (Persistent Disk Hard Disk Drive).
+     * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
      * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; @@ -590,8 +596,10 @@ public java.lang.String getBootDiskType() { * *
      * Optional. Type of the boot disk (default is "pd-standard").
-     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+     * "pd-ssd" (Persistent Disk Solid State Drive),
+     * or "pd-standard" (Persistent Disk Hard Disk Drive).
+     * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
      * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; @@ -614,8 +622,10 @@ public com.google.protobuf.ByteString getBootDiskTypeBytes() { * *
      * Optional. Type of the boot disk (default is "pd-standard").
-     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+     * "pd-ssd" (Persistent Disk Solid State Drive),
+     * or "pd-standard" (Persistent Disk Hard Disk Drive).
+     * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
      * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; @@ -637,8 +647,10 @@ public Builder setBootDiskType(java.lang.String value) { * *
      * Optional. Type of the boot disk (default is "pd-standard").
-     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+     * "pd-ssd" (Persistent Disk Solid State Drive),
+     * or "pd-standard" (Persistent Disk Hard Disk Drive).
+     * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
      * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; @@ -656,8 +668,10 @@ public Builder clearBootDiskType() { * *
      * Optional. Type of the boot disk (default is "pd-standard").
-     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+     * "pd-ssd" (Persistent Disk Solid State Drive),
+     * or "pd-standard" (Persistent Disk Hard Disk Drive).
+     * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
      * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java index df7d411f..fb52a5c3 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java @@ -28,8 +28,10 @@ public interface DiskConfigOrBuilder * *
    * Optional. Type of the boot disk (default is "pd-standard").
-   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+   * "pd-ssd" (Persistent Disk Solid State Drive),
+   * or "pd-standard" (Persistent Disk Hard Disk Drive).
+   * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
    * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; @@ -42,8 +44,10 @@ public interface DiskConfigOrBuilder * *
    * Optional. Type of the boot disk (default is "pd-standard").
-   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
-   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
+   * "pd-ssd" (Persistent Disk Solid State Drive),
+   * or "pd-standard" (Persistent Disk Hard Disk Drive).
+   * See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
    * 
* * string boot_disk_type = 3 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java index 9ec84a3d..3a5f8034 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java @@ -42,6 +42,7 @@ private GceClusterConfig() { zoneUri_ = ""; networkUri_ = ""; subnetworkUri_ = ""; + privateIpv6GoogleAccess_ = 0; serviceAccount_ = ""; serviceAccountScopes_ = com.google.protobuf.LazyStringArrayList.EMPTY; tags_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -159,6 +160,46 @@ private GceClusterConfig( reservationAffinity_ = subBuilder.buildPartial(); } + break; + } + case 96: + { + int rawValue = input.readEnum(); + + privateIpv6GoogleAccess_ = rawValue; + break; + } + case 106: + { + com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder subBuilder = null; + if (nodeGroupAffinity_ != null) { + subBuilder = nodeGroupAffinity_.toBuilder(); + } + nodeGroupAffinity_ = + input.readMessage( + com.google.cloud.dataproc.v1.NodeGroupAffinity.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(nodeGroupAffinity_); + nodeGroupAffinity_ = subBuilder.buildPartial(); + } + + break; + } + case 114: + { + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder subBuilder = null; + if (shieldedInstanceConfig_ != null) { + subBuilder = shieldedInstanceConfig_.toBuilder(); + } + shieldedInstanceConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(shieldedInstanceConfig_); + shieldedInstanceConfig_ = subBuilder.buildPartial(); + } + break; } default: @@ -212,6 +253,202 @@ protected com.google.protobuf.MapField internalGetMapField(int number) { com.google.cloud.dataproc.v1.GceClusterConfig.Builder.class); } + /** + * + * + *
+   * `PrivateIpv6GoogleAccess` controls whether and how Dataproc cluster nodes
+   * can communicate with Google Services through gRPC over IPv6.
+   * These values are directly mapped to corresponding values in the
+   * [Compute Engine Instance
+   * fields](https://cloud.google.com/compute/docs/reference/rest/v1/instances).
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess} + */ + public enum PrivateIpv6GoogleAccess implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * If unspecified, Compute Engine default behavior will apply, which
+     * is the same as [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK].
+     * 
+ * + * PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0; + */ + PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED(0), + /** + * + * + *
+     * Private access to and from Google Services configuration
+     * inherited from the subnetwork configuration. This is the
+     * default Compute Engine behavior.
+     * 
+ * + * INHERIT_FROM_SUBNETWORK = 1; + */ + INHERIT_FROM_SUBNETWORK(1), + /** + * + * + *
+     * Enables outbound private IPv6 access to Google Services from the Dataproc
+     * cluster.
+     * 
+ * + * OUTBOUND = 2; + */ + OUTBOUND(2), + /** + * + * + *
+     * Enables bidirectional private IPv6 access between Google Services and the
+     * Dataproc cluster.
+     * 
+ * + * BIDIRECTIONAL = 3; + */ + BIDIRECTIONAL(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * If unspecified, Compute Engine default behavior will apply, which
+     * is the same as [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK].
+     * 
+ * + * PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0; + */ + public static final int PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * Private access to and from Google Services configuration
+     * inherited from the subnetwork configuration. This is the
+     * default Compute Engine behavior.
+     * 
+ * + * INHERIT_FROM_SUBNETWORK = 1; + */ + public static final int INHERIT_FROM_SUBNETWORK_VALUE = 1; + /** + * + * + *
+     * Enables outbound private IPv6 access to Google Services from the Dataproc
+     * cluster.
+     * 
+ * + * OUTBOUND = 2; + */ + public static final int OUTBOUND_VALUE = 2; + /** + * + * + *
+     * Enables bidirectional private IPv6 access between Google Services and the
+     * Dataproc cluster.
+     * 
+ * + * BIDIRECTIONAL = 3; + */ + public static final int BIDIRECTIONAL_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static PrivateIpv6GoogleAccess valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static PrivateIpv6GoogleAccess forNumber(int value) { + switch (value) { + case 0: + return PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED; + case 1: + return INHERIT_FROM_SUBNETWORK; + case 2: + return OUTBOUND; + case 3: + return BIDIRECTIONAL; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public PrivateIpv6GoogleAccess findValueByNumber(int number) { + return PrivateIpv6GoogleAccess.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.dataproc.v1.GceClusterConfig.getDescriptor().getEnumTypes().get(0); + } + + private static final PrivateIpv6GoogleAccess[] VALUES = values(); + + public static PrivateIpv6GoogleAccess valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private PrivateIpv6GoogleAccess(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess) + } + public static final int ZONE_URI_FIELD_NUMBER = 1; private volatile java.lang.Object zoneUri_; /** @@ -426,6 +663,50 @@ public boolean getInternalIpOnly() { return internalIpOnly_; } + public static final int PRIVATE_IPV6_GOOGLE_ACCESS_FIELD_NUMBER = 12; + private int privateIpv6GoogleAccess_; + /** + * + * + *
+   * Optional. The type of IPv6 access for a cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for privateIpv6GoogleAccess. + */ + @java.lang.Override + public int getPrivateIpv6GoogleAccessValue() { + return privateIpv6GoogleAccess_; + } + /** + * + * + *
+   * Optional. The type of IPv6 access for a cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The privateIpv6GoogleAccess. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess + getPrivateIpv6GoogleAccess() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess result = + com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.valueOf( + privateIpv6GoogleAccess_); + return result == null + ? com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.UNRECOGNIZED + : result; + } + public static final int SERVICE_ACCOUNT_FIELD_NUMBER = 8; private volatile java.lang.Object serviceAccount_; /** @@ -825,6 +1106,118 @@ public com.google.cloud.dataproc.v1.ReservationAffinity getReservationAffinity() return getReservationAffinity(); } + public static final int NODE_GROUP_AFFINITY_FIELD_NUMBER = 13; + private com.google.cloud.dataproc.v1.NodeGroupAffinity nodeGroupAffinity_; + /** + * + * + *
+   * Optional. Node Group Affinity for sole-tenant clusters.
+   * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the nodeGroupAffinity field is set. + */ + @java.lang.Override + public boolean hasNodeGroupAffinity() { + return nodeGroupAffinity_ != null; + } + /** + * + * + *
+   * Optional. Node Group Affinity for sole-tenant clusters.
+   * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The nodeGroupAffinity. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.NodeGroupAffinity getNodeGroupAffinity() { + return nodeGroupAffinity_ == null + ? com.google.cloud.dataproc.v1.NodeGroupAffinity.getDefaultInstance() + : nodeGroupAffinity_; + } + /** + * + * + *
+   * Optional. Node Group Affinity for sole-tenant clusters.
+   * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.NodeGroupAffinityOrBuilder getNodeGroupAffinityOrBuilder() { + return getNodeGroupAffinity(); + } + + public static final int SHIELDED_INSTANCE_CONFIG_FIELD_NUMBER = 14; + private com.google.cloud.dataproc.v1.ShieldedInstanceConfig shieldedInstanceConfig_; + /** + * + * + *
+   * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+   * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+   * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the shieldedInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasShieldedInstanceConfig() { + return shieldedInstanceConfig_ != null; + } + /** + * + * + *
+   * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+   * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+   * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The shieldedInstanceConfig. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.ShieldedInstanceConfig getShieldedInstanceConfig() { + return shieldedInstanceConfig_ == null + ? com.google.cloud.dataproc.v1.ShieldedInstanceConfig.getDefaultInstance() + : shieldedInstanceConfig_; + } + /** + * + * + *
+   * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+   * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+   * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.ShieldedInstanceConfigOrBuilder + getShieldedInstanceConfigOrBuilder() { + return getShieldedInstanceConfig(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -866,6 +1259,18 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (reservationAffinity_ != null) { output.writeMessage(11, getReservationAffinity()); } + if (privateIpv6GoogleAccess_ + != com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess + .PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED + .getNumber()) { + output.writeEnum(12, privateIpv6GoogleAccess_); + } + if (nodeGroupAffinity_ != null) { + output.writeMessage(13, getNodeGroupAffinity()); + } + if (shieldedInstanceConfig_ != null) { + output.writeMessage(14, getShieldedInstanceConfig()); + } unknownFields.writeTo(output); } @@ -920,6 +1325,19 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getReservationAffinity()); } + if (privateIpv6GoogleAccess_ + != com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess + .PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(12, privateIpv6GoogleAccess_); + } + if (nodeGroupAffinity_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(13, getNodeGroupAffinity()); + } + if (shieldedInstanceConfig_ != null) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(14, getShieldedInstanceConfig()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -940,6 +1358,7 @@ public boolean equals(final java.lang.Object obj) { if (!getNetworkUri().equals(other.getNetworkUri())) return false; if (!getSubnetworkUri().equals(other.getSubnetworkUri())) return false; if (getInternalIpOnly() != other.getInternalIpOnly()) return false; + if (privateIpv6GoogleAccess_ != other.privateIpv6GoogleAccess_) return false; if (!getServiceAccount().equals(other.getServiceAccount())) return false; if (!getServiceAccountScopesList().equals(other.getServiceAccountScopesList())) return false; if (!getTagsList().equals(other.getTagsList())) return false; @@ -948,6 +1367,14 @@ public boolean equals(final java.lang.Object obj) { if (hasReservationAffinity()) { if (!getReservationAffinity().equals(other.getReservationAffinity())) return false; } + if (hasNodeGroupAffinity() != other.hasNodeGroupAffinity()) return false; + if (hasNodeGroupAffinity()) { + if (!getNodeGroupAffinity().equals(other.getNodeGroupAffinity())) return false; + } + if (hasShieldedInstanceConfig() != other.hasShieldedInstanceConfig()) return false; + if (hasShieldedInstanceConfig()) { + if (!getShieldedInstanceConfig().equals(other.getShieldedInstanceConfig())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -967,6 +1394,8 @@ public int hashCode() { hash = (53 * hash) + getSubnetworkUri().hashCode(); hash = (37 * hash) + INTERNAL_IP_ONLY_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getInternalIpOnly()); + hash = (37 * hash) + PRIVATE_IPV6_GOOGLE_ACCESS_FIELD_NUMBER; + hash = (53 * hash) + privateIpv6GoogleAccess_; hash = (37 * hash) + SERVICE_ACCOUNT_FIELD_NUMBER; hash = (53 * hash) + getServiceAccount().hashCode(); if (getServiceAccountScopesCount() > 0) { @@ -985,6 +1414,14 @@ public int hashCode() { hash = (37 * hash) + RESERVATION_AFFINITY_FIELD_NUMBER; hash = (53 * hash) + getReservationAffinity().hashCode(); } + if (hasNodeGroupAffinity()) { + hash = (37 * hash) + NODE_GROUP_AFFINITY_FIELD_NUMBER; + hash = (53 * hash) + getNodeGroupAffinity().hashCode(); + } + if (hasShieldedInstanceConfig()) { + hash = (37 * hash) + SHIELDED_INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getShieldedInstanceConfig().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1159,6 +1596,8 @@ public Builder clear() { internalIpOnly_ = false; + privateIpv6GoogleAccess_ = 0; + serviceAccount_ = ""; serviceAccountScopes_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -1172,6 +1611,18 @@ public Builder clear() { reservationAffinity_ = null; reservationAffinityBuilder_ = null; } + if (nodeGroupAffinityBuilder_ == null) { + nodeGroupAffinity_ = null; + } else { + nodeGroupAffinity_ = null; + nodeGroupAffinityBuilder_ = null; + } + if (shieldedInstanceConfigBuilder_ == null) { + shieldedInstanceConfig_ = null; + } else { + shieldedInstanceConfig_ = null; + shieldedInstanceConfigBuilder_ = null; + } return this; } @@ -1204,6 +1655,7 @@ public com.google.cloud.dataproc.v1.GceClusterConfig buildPartial() { result.networkUri_ = networkUri_; result.subnetworkUri_ = subnetworkUri_; result.internalIpOnly_ = internalIpOnly_; + result.privateIpv6GoogleAccess_ = privateIpv6GoogleAccess_; result.serviceAccount_ = serviceAccount_; if (((bitField0_ & 0x00000001) != 0)) { serviceAccountScopes_ = serviceAccountScopes_.getUnmodifiableView(); @@ -1222,6 +1674,16 @@ public com.google.cloud.dataproc.v1.GceClusterConfig buildPartial() { } else { result.reservationAffinity_ = reservationAffinityBuilder_.build(); } + if (nodeGroupAffinityBuilder_ == null) { + result.nodeGroupAffinity_ = nodeGroupAffinity_; + } else { + result.nodeGroupAffinity_ = nodeGroupAffinityBuilder_.build(); + } + if (shieldedInstanceConfigBuilder_ == null) { + result.shieldedInstanceConfig_ = shieldedInstanceConfig_; + } else { + result.shieldedInstanceConfig_ = shieldedInstanceConfigBuilder_.build(); + } onBuilt(); return result; } @@ -1286,6 +1748,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.GceClusterConfig other) { if (other.getInternalIpOnly() != false) { setInternalIpOnly(other.getInternalIpOnly()); } + if (other.privateIpv6GoogleAccess_ != 0) { + setPrivateIpv6GoogleAccessValue(other.getPrivateIpv6GoogleAccessValue()); + } if (!other.getServiceAccount().isEmpty()) { serviceAccount_ = other.serviceAccount_; onChanged(); @@ -1314,6 +1779,12 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.GceClusterConfig other) { if (other.hasReservationAffinity()) { mergeReservationAffinity(other.getReservationAffinity()); } + if (other.hasNodeGroupAffinity()) { + mergeNodeGroupAffinity(other.getNodeGroupAffinity()); + } + if (other.hasShieldedInstanceConfig()) { + mergeShieldedInstanceConfig(other.getShieldedInstanceConfig()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1840,26 +2311,132 @@ public Builder clearInternalIpOnly() { return this; } - private java.lang.Object serviceAccount_ = ""; + private int privateIpv6GoogleAccess_ = 0; /** * * *
-     * Optional. The [Dataproc service
-     * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)
-     * (also see [VM Data Plane
-     * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
-     * used by Dataproc cluster VM instances to access Google Cloud Platform
-     * services.
-     * If not specified, the
-     * [Compute Engine default service
-     * account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)
-     * is used.
+     * Optional. The type of IPv6 access for a cluster.
      * 
* - * string service_account = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * * - * @return The serviceAccount. + * @return The enum numeric value on the wire for privateIpv6GoogleAccess. + */ + @java.lang.Override + public int getPrivateIpv6GoogleAccessValue() { + return privateIpv6GoogleAccess_; + } + /** + * + * + *
+     * Optional. The type of IPv6 access for a cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for privateIpv6GoogleAccess to set. + * @return This builder for chaining. + */ + public Builder setPrivateIpv6GoogleAccessValue(int value) { + + privateIpv6GoogleAccess_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The type of IPv6 access for a cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The privateIpv6GoogleAccess. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess + getPrivateIpv6GoogleAccess() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess result = + com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.valueOf( + privateIpv6GoogleAccess_); + return result == null + ? com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Optional. The type of IPv6 access for a cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The privateIpv6GoogleAccess to set. + * @return This builder for chaining. + */ + public Builder setPrivateIpv6GoogleAccess( + com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess value) { + if (value == null) { + throw new NullPointerException(); + } + + privateIpv6GoogleAccess_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The type of IPv6 access for a cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearPrivateIpv6GoogleAccess() { + + privateIpv6GoogleAccess_ = 0; + onChanged(); + return this; + } + + private java.lang.Object serviceAccount_ = ""; + /** + * + * + *
+     * Optional. The [Dataproc service
+     * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)
+     * (also see [VM Data Plane
+     * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
+     * used by Dataproc cluster VM instances to access Google Cloud Platform
+     * services.
+     * If not specified, the
+     * [Compute Engine default service
+     * account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)
+     * is used.
+     * 
+ * + * string service_account = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The serviceAccount. */ public java.lang.String getServiceAccount() { java.lang.Object ref = serviceAccount_; @@ -2814,6 +3391,428 @@ public Builder clearReservationAffinity() { return reservationAffinityBuilder_; } + private com.google.cloud.dataproc.v1.NodeGroupAffinity nodeGroupAffinity_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.NodeGroupAffinity, + com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder, + com.google.cloud.dataproc.v1.NodeGroupAffinityOrBuilder> + nodeGroupAffinityBuilder_; + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the nodeGroupAffinity field is set. + */ + public boolean hasNodeGroupAffinity() { + return nodeGroupAffinityBuilder_ != null || nodeGroupAffinity_ != null; + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The nodeGroupAffinity. + */ + public com.google.cloud.dataproc.v1.NodeGroupAffinity getNodeGroupAffinity() { + if (nodeGroupAffinityBuilder_ == null) { + return nodeGroupAffinity_ == null + ? com.google.cloud.dataproc.v1.NodeGroupAffinity.getDefaultInstance() + : nodeGroupAffinity_; + } else { + return nodeGroupAffinityBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setNodeGroupAffinity(com.google.cloud.dataproc.v1.NodeGroupAffinity value) { + if (nodeGroupAffinityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + nodeGroupAffinity_ = value; + onChanged(); + } else { + nodeGroupAffinityBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setNodeGroupAffinity( + com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder builderForValue) { + if (nodeGroupAffinityBuilder_ == null) { + nodeGroupAffinity_ = builderForValue.build(); + onChanged(); + } else { + nodeGroupAffinityBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeNodeGroupAffinity(com.google.cloud.dataproc.v1.NodeGroupAffinity value) { + if (nodeGroupAffinityBuilder_ == null) { + if (nodeGroupAffinity_ != null) { + nodeGroupAffinity_ = + com.google.cloud.dataproc.v1.NodeGroupAffinity.newBuilder(nodeGroupAffinity_) + .mergeFrom(value) + .buildPartial(); + } else { + nodeGroupAffinity_ = value; + } + onChanged(); + } else { + nodeGroupAffinityBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearNodeGroupAffinity() { + if (nodeGroupAffinityBuilder_ == null) { + nodeGroupAffinity_ = null; + onChanged(); + } else { + nodeGroupAffinity_ = null; + nodeGroupAffinityBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder getNodeGroupAffinityBuilder() { + + onChanged(); + return getNodeGroupAffinityFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.NodeGroupAffinityOrBuilder getNodeGroupAffinityOrBuilder() { + if (nodeGroupAffinityBuilder_ != null) { + return nodeGroupAffinityBuilder_.getMessageOrBuilder(); + } else { + return nodeGroupAffinity_ == null + ? com.google.cloud.dataproc.v1.NodeGroupAffinity.getDefaultInstance() + : nodeGroupAffinity_; + } + } + /** + * + * + *
+     * Optional. Node Group Affinity for sole-tenant clusters.
+     * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.NodeGroupAffinity, + com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder, + com.google.cloud.dataproc.v1.NodeGroupAffinityOrBuilder> + getNodeGroupAffinityFieldBuilder() { + if (nodeGroupAffinityBuilder_ == null) { + nodeGroupAffinityBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.NodeGroupAffinity, + com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder, + com.google.cloud.dataproc.v1.NodeGroupAffinityOrBuilder>( + getNodeGroupAffinity(), getParentForChildren(), isClean()); + nodeGroupAffinity_ = null; + } + return nodeGroupAffinityBuilder_; + } + + private com.google.cloud.dataproc.v1.ShieldedInstanceConfig shieldedInstanceConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.ShieldedInstanceConfig, + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder, + com.google.cloud.dataproc.v1.ShieldedInstanceConfigOrBuilder> + shieldedInstanceConfigBuilder_; + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the shieldedInstanceConfig field is set. + */ + public boolean hasShieldedInstanceConfig() { + return shieldedInstanceConfigBuilder_ != null || shieldedInstanceConfig_ != null; + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The shieldedInstanceConfig. + */ + public com.google.cloud.dataproc.v1.ShieldedInstanceConfig getShieldedInstanceConfig() { + if (shieldedInstanceConfigBuilder_ == null) { + return shieldedInstanceConfig_ == null + ? com.google.cloud.dataproc.v1.ShieldedInstanceConfig.getDefaultInstance() + : shieldedInstanceConfig_; + } else { + return shieldedInstanceConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setShieldedInstanceConfig( + com.google.cloud.dataproc.v1.ShieldedInstanceConfig value) { + if (shieldedInstanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + shieldedInstanceConfig_ = value; + onChanged(); + } else { + shieldedInstanceConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setShieldedInstanceConfig( + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder builderForValue) { + if (shieldedInstanceConfigBuilder_ == null) { + shieldedInstanceConfig_ = builderForValue.build(); + onChanged(); + } else { + shieldedInstanceConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeShieldedInstanceConfig( + com.google.cloud.dataproc.v1.ShieldedInstanceConfig value) { + if (shieldedInstanceConfigBuilder_ == null) { + if (shieldedInstanceConfig_ != null) { + shieldedInstanceConfig_ = + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.newBuilder( + shieldedInstanceConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + shieldedInstanceConfig_ = value; + } + onChanged(); + } else { + shieldedInstanceConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearShieldedInstanceConfig() { + if (shieldedInstanceConfigBuilder_ == null) { + shieldedInstanceConfig_ = null; + onChanged(); + } else { + shieldedInstanceConfig_ = null; + shieldedInstanceConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder + getShieldedInstanceConfigBuilder() { + + onChanged(); + return getShieldedInstanceConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.ShieldedInstanceConfigOrBuilder + getShieldedInstanceConfigOrBuilder() { + if (shieldedInstanceConfigBuilder_ != null) { + return shieldedInstanceConfigBuilder_.getMessageOrBuilder(); + } else { + return shieldedInstanceConfig_ == null + ? com.google.cloud.dataproc.v1.ShieldedInstanceConfig.getDefaultInstance() + : shieldedInstanceConfig_; + } + } + /** + * + * + *
+     * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+     * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+     * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.ShieldedInstanceConfig, + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder, + com.google.cloud.dataproc.v1.ShieldedInstanceConfigOrBuilder> + getShieldedInstanceConfigFieldBuilder() { + if (shieldedInstanceConfigBuilder_ == null) { + shieldedInstanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.ShieldedInstanceConfig, + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder, + com.google.cloud.dataproc.v1.ShieldedInstanceConfigOrBuilder>( + getShieldedInstanceConfig(), getParentForChildren(), isClean()); + shieldedInstanceConfig_ = null; + } + return shieldedInstanceConfigBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java index 047c0eab..2ab99951 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java @@ -160,6 +160,36 @@ public interface GceClusterConfigOrBuilder */ boolean getInternalIpOnly(); + /** + * + * + *
+   * Optional. The type of IPv6 access for a cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for privateIpv6GoogleAccess. + */ + int getPrivateIpv6GoogleAccessValue(); + /** + * + * + *
+   * Optional. The type of IPv6 access for a cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The privateIpv6GoogleAccess. + */ + com.google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess + getPrivateIpv6GoogleAccess(); + /** * * @@ -457,4 +487,89 @@ public interface GceClusterConfigOrBuilder * */ com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder getReservationAffinityOrBuilder(); + + /** + * + * + *
+   * Optional. Node Group Affinity for sole-tenant clusters.
+   * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the nodeGroupAffinity field is set. + */ + boolean hasNodeGroupAffinity(); + /** + * + * + *
+   * Optional. Node Group Affinity for sole-tenant clusters.
+   * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The nodeGroupAffinity. + */ + com.google.cloud.dataproc.v1.NodeGroupAffinity getNodeGroupAffinity(); + /** + * + * + *
+   * Optional. Node Group Affinity for sole-tenant clusters.
+   * 
+ * + * + * .google.cloud.dataproc.v1.NodeGroupAffinity node_group_affinity = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.NodeGroupAffinityOrBuilder getNodeGroupAffinityOrBuilder(); + + /** + * + * + *
+   * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+   * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+   * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the shieldedInstanceConfig field is set. + */ + boolean hasShieldedInstanceConfig(); + /** + * + * + *
+   * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+   * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+   * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The shieldedInstanceConfig. + */ + com.google.cloud.dataproc.v1.ShieldedInstanceConfig getShieldedInstanceConfig(); + /** + * + * + *
+   * Optional. Shielded Instance Config for clusters using [Compute Engine Shielded
+   * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+   * 
+ * + * + * .google.cloud.dataproc.v1.ShieldedInstanceConfig shielded_instance_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.ShieldedInstanceConfigOrBuilder getShieldedInstanceConfigOrBuilder(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java new file mode 100644 index 00000000..dba308fa --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java @@ -0,0 +1,1690 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * The GKE config for this cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig} + */ +public final class GkeClusterConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeClusterConfig) + GkeClusterConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use GkeClusterConfig.newBuilder() to construct. + private GkeClusterConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GkeClusterConfig() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GkeClusterConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private GkeClusterConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder + subBuilder = null; + if (namespacedGkeDeploymentTarget_ != null) { + subBuilder = namespacedGkeDeploymentTarget_.toBuilder(); + } + namespacedGkeDeploymentTarget_ = + input.readMessage( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + .parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(namespacedGkeDeploymentTarget_); + namespacedGkeDeploymentTarget_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.GkeClusterConfig.class, + com.google.cloud.dataproc.v1.GkeClusterConfig.Builder.class); + } + + public interface NamespacedGkeDeploymentTargetOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The target GKE cluster to deploy to.
+     * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+     * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The targetGkeCluster. + */ + java.lang.String getTargetGkeCluster(); + /** + * + * + *
+     * Optional. The target GKE cluster to deploy to.
+     * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+     * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetGkeCluster. + */ + com.google.protobuf.ByteString getTargetGkeClusterBytes(); + + /** + * + * + *
+     * Optional. A namespace within the GKE cluster to deploy into.
+     * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterNamespace. + */ + java.lang.String getClusterNamespace(); + /** + * + * + *
+     * Optional. A namespace within the GKE cluster to deploy into.
+     * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterNamespace. + */ + com.google.protobuf.ByteString getClusterNamespaceBytes(); + } + /** + * + * + *
+   * A full, namespace-isolated deployment target for an existing GKE cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget} + */ + public static final class NamespacedGkeDeploymentTarget + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) + NamespacedGkeDeploymentTargetOrBuilder { + private static final long serialVersionUID = 0L; + // Use NamespacedGkeDeploymentTarget.newBuilder() to construct. + private NamespacedGkeDeploymentTarget( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private NamespacedGkeDeploymentTarget() { + targetGkeCluster_ = ""; + clusterNamespace_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new NamespacedGkeDeploymentTarget(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private NamespacedGkeDeploymentTarget( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + targetGkeCluster_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + clusterNamespace_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.class, + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder + .class); + } + + public static final int TARGET_GKE_CLUSTER_FIELD_NUMBER = 1; + private volatile java.lang.Object targetGkeCluster_; + /** + * + * + *
+     * Optional. The target GKE cluster to deploy to.
+     * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+     * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The targetGkeCluster. + */ + @java.lang.Override + public java.lang.String getTargetGkeCluster() { + java.lang.Object ref = targetGkeCluster_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetGkeCluster_ = s; + return s; + } + } + /** + * + * + *
+     * Optional. The target GKE cluster to deploy to.
+     * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+     * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetGkeCluster. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTargetGkeClusterBytes() { + java.lang.Object ref = targetGkeCluster_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetGkeCluster_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAMESPACE_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterNamespace_; + /** + * + * + *
+     * Optional. A namespace within the GKE cluster to deploy into.
+     * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterNamespace. + */ + @java.lang.Override + public java.lang.String getClusterNamespace() { + java.lang.Object ref = clusterNamespace_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterNamespace_ = s; + return s; + } + } + /** + * + * + *
+     * Optional. A namespace within the GKE cluster to deploy into.
+     * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterNamespace. + */ + @java.lang.Override + public com.google.protobuf.ByteString getClusterNamespaceBytes() { + java.lang.Object ref = clusterNamespace_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterNamespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getTargetGkeClusterBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, targetGkeCluster_); + } + if (!getClusterNamespaceBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterNamespace_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getTargetGkeClusterBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, targetGkeCluster_); + } + if (!getClusterNamespaceBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterNamespace_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget other = + (com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) obj; + + if (!getTargetGkeCluster().equals(other.getTargetGkeCluster())) return false; + if (!getClusterNamespace().equals(other.getClusterNamespace())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TARGET_GKE_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getTargetGkeCluster().hashCode(); + hash = (37 * hash) + CLUSTER_NAMESPACE_FIELD_NUMBER; + hash = (53 * hash) + getClusterNamespace().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * A full, namespace-isolated deployment target for an existing GKE cluster.
+     * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.class, + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder + .class); + } + + // Construct using + // com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + targetGkeCluster_ = ""; + + clusterNamespace_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget build() { + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + buildPartial() { + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget result = + new com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget(this); + result.targetGkeCluster_ = targetGkeCluster_; + result.clusterNamespace_ = clusterNamespace_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) { + return mergeFrom( + (com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget other) { + if (other + == com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + .getDefaultInstance()) return this; + if (!other.getTargetGkeCluster().isEmpty()) { + targetGkeCluster_ = other.targetGkeCluster_; + onChanged(); + } + if (!other.getClusterNamespace().isEmpty()) { + clusterNamespace_ = other.clusterNamespace_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget parsedMessage = + null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object targetGkeCluster_ = ""; + /** + * + * + *
+       * Optional. The target GKE cluster to deploy to.
+       * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+       * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The targetGkeCluster. + */ + public java.lang.String getTargetGkeCluster() { + java.lang.Object ref = targetGkeCluster_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetGkeCluster_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Optional. The target GKE cluster to deploy to.
+       * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+       * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetGkeCluster. + */ + public com.google.protobuf.ByteString getTargetGkeClusterBytes() { + java.lang.Object ref = targetGkeCluster_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetGkeCluster_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Optional. The target GKE cluster to deploy to.
+       * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+       * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The targetGkeCluster to set. + * @return This builder for chaining. + */ + public Builder setTargetGkeCluster(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + targetGkeCluster_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. The target GKE cluster to deploy to.
+       * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+       * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearTargetGkeCluster() { + + targetGkeCluster_ = getDefaultInstance().getTargetGkeCluster(); + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. The target GKE cluster to deploy to.
+       * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+       * 
+ * + * + * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for targetGkeCluster to set. + * @return This builder for chaining. + */ + public Builder setTargetGkeClusterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + targetGkeCluster_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterNamespace_ = ""; + /** + * + * + *
+       * Optional. A namespace within the GKE cluster to deploy into.
+       * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterNamespace. + */ + public java.lang.String getClusterNamespace() { + java.lang.Object ref = clusterNamespace_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterNamespace_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Optional. A namespace within the GKE cluster to deploy into.
+       * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterNamespace. + */ + public com.google.protobuf.ByteString getClusterNamespaceBytes() { + java.lang.Object ref = clusterNamespace_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterNamespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Optional. A namespace within the GKE cluster to deploy into.
+       * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The clusterNamespace to set. + * @return This builder for chaining. + */ + public Builder setClusterNamespace(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterNamespace_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. A namespace within the GKE cluster to deploy into.
+       * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearClusterNamespace() { + + clusterNamespace_ = getDefaultInstance().getClusterNamespace(); + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. A namespace within the GKE cluster to deploy into.
+       * 
+ * + * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for clusterNamespace to set. + * @return This builder for chaining. + */ + public Builder setClusterNamespaceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterNamespace_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) + private static final com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget(); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public NamespacedGkeDeploymentTarget parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NamespacedGkeDeploymentTarget(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int NAMESPACED_GKE_DEPLOYMENT_TARGET_FIELD_NUMBER = 1; + private com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + namespacedGkeDeploymentTarget_; + /** + * + * + *
+   * Optional. A target for the deployment.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the namespacedGkeDeploymentTarget field is set. + */ + @java.lang.Override + public boolean hasNamespacedGkeDeploymentTarget() { + return namespacedGkeDeploymentTarget_ != null; + } + /** + * + * + *
+   * Optional. A target for the deployment.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The namespacedGkeDeploymentTarget. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + getNamespacedGkeDeploymentTarget() { + return namespacedGkeDeploymentTarget_ == null + ? com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + .getDefaultInstance() + : namespacedGkeDeploymentTarget_; + } + /** + * + * + *
+   * Optional. A target for the deployment.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder + getNamespacedGkeDeploymentTargetOrBuilder() { + return getNamespacedGkeDeploymentTarget(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (namespacedGkeDeploymentTarget_ != null) { + output.writeMessage(1, getNamespacedGkeDeploymentTarget()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (namespacedGkeDeploymentTarget_ != null) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, getNamespacedGkeDeploymentTarget()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.GkeClusterConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.GkeClusterConfig other = + (com.google.cloud.dataproc.v1.GkeClusterConfig) obj; + + if (hasNamespacedGkeDeploymentTarget() != other.hasNamespacedGkeDeploymentTarget()) + return false; + if (hasNamespacedGkeDeploymentTarget()) { + if (!getNamespacedGkeDeploymentTarget().equals(other.getNamespacedGkeDeploymentTarget())) + return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasNamespacedGkeDeploymentTarget()) { + hash = (37 * hash) + NAMESPACED_GKE_DEPLOYMENT_TARGET_FIELD_NUMBER; + hash = (53 * hash) + getNamespacedGkeDeploymentTarget().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.GkeClusterConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * The GKE config for this cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeClusterConfig) + com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.GkeClusterConfig.class, + com.google.cloud.dataproc.v1.GkeClusterConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.GkeClusterConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (namespacedGkeDeploymentTargetBuilder_ == null) { + namespacedGkeDeploymentTarget_ = null; + } else { + namespacedGkeDeploymentTarget_ = null; + namespacedGkeDeploymentTargetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig build() { + com.google.cloud.dataproc.v1.GkeClusterConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig buildPartial() { + com.google.cloud.dataproc.v1.GkeClusterConfig result = + new com.google.cloud.dataproc.v1.GkeClusterConfig(this); + if (namespacedGkeDeploymentTargetBuilder_ == null) { + result.namespacedGkeDeploymentTarget_ = namespacedGkeDeploymentTarget_; + } else { + result.namespacedGkeDeploymentTarget_ = namespacedGkeDeploymentTargetBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.GkeClusterConfig) { + return mergeFrom((com.google.cloud.dataproc.v1.GkeClusterConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.GkeClusterConfig other) { + if (other == com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance()) return this; + if (other.hasNamespacedGkeDeploymentTarget()) { + mergeNamespacedGkeDeploymentTarget(other.getNamespacedGkeDeploymentTarget()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.GkeClusterConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.GkeClusterConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + namespacedGkeDeploymentTarget_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget, + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder, + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder> + namespacedGkeDeploymentTargetBuilder_; + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the namespacedGkeDeploymentTarget field is set. + */ + public boolean hasNamespacedGkeDeploymentTarget() { + return namespacedGkeDeploymentTargetBuilder_ != null + || namespacedGkeDeploymentTarget_ != null; + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The namespacedGkeDeploymentTarget. + */ + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + getNamespacedGkeDeploymentTarget() { + if (namespacedGkeDeploymentTargetBuilder_ == null) { + return namespacedGkeDeploymentTarget_ == null + ? com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + .getDefaultInstance() + : namespacedGkeDeploymentTarget_; + } else { + return namespacedGkeDeploymentTargetBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setNamespacedGkeDeploymentTarget( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget value) { + if (namespacedGkeDeploymentTargetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + namespacedGkeDeploymentTarget_ = value; + onChanged(); + } else { + namespacedGkeDeploymentTargetBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setNamespacedGkeDeploymentTarget( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder + builderForValue) { + if (namespacedGkeDeploymentTargetBuilder_ == null) { + namespacedGkeDeploymentTarget_ = builderForValue.build(); + onChanged(); + } else { + namespacedGkeDeploymentTargetBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeNamespacedGkeDeploymentTarget( + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget value) { + if (namespacedGkeDeploymentTargetBuilder_ == null) { + if (namespacedGkeDeploymentTarget_ != null) { + namespacedGkeDeploymentTarget_ = + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + .newBuilder(namespacedGkeDeploymentTarget_) + .mergeFrom(value) + .buildPartial(); + } else { + namespacedGkeDeploymentTarget_ = value; + } + onChanged(); + } else { + namespacedGkeDeploymentTargetBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearNamespacedGkeDeploymentTarget() { + if (namespacedGkeDeploymentTargetBuilder_ == null) { + namespacedGkeDeploymentTarget_ = null; + onChanged(); + } else { + namespacedGkeDeploymentTarget_ = null; + namespacedGkeDeploymentTargetBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder + getNamespacedGkeDeploymentTargetBuilder() { + + onChanged(); + return getNamespacedGkeDeploymentTargetFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder + getNamespacedGkeDeploymentTargetOrBuilder() { + if (namespacedGkeDeploymentTargetBuilder_ != null) { + return namespacedGkeDeploymentTargetBuilder_.getMessageOrBuilder(); + } else { + return namespacedGkeDeploymentTarget_ == null + ? com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + .getDefaultInstance() + : namespacedGkeDeploymentTarget_; + } + } + /** + * + * + *
+     * Optional. A target for the deployment.
+     * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget, + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder, + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder> + getNamespacedGkeDeploymentTargetFieldBuilder() { + if (namespacedGkeDeploymentTargetBuilder_ == null) { + namespacedGkeDeploymentTargetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget, + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder, + com.google.cloud.dataproc.v1.GkeClusterConfig + .NamespacedGkeDeploymentTargetOrBuilder>( + getNamespacedGkeDeploymentTarget(), getParentForChildren(), isClean()); + namespacedGkeDeploymentTarget_ = null; + } + return namespacedGkeDeploymentTargetBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeClusterConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeClusterConfig) + private static final com.google.cloud.dataproc.v1.GkeClusterConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.GkeClusterConfig(); + } + + public static com.google.cloud.dataproc.v1.GkeClusterConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GkeClusterConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GkeClusterConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GkeClusterConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java new file mode 100644 index 00000000..994f7dbc --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java @@ -0,0 +1,68 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface GkeClusterConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeClusterConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. A target for the deployment.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the namespacedGkeDeploymentTarget field is set. + */ + boolean hasNamespacedGkeDeploymentTarget(); + /** + * + * + *
+   * Optional. A target for the deployment.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The namespacedGkeDeploymentTarget. + */ + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget + getNamespacedGkeDeploymentTarget(); + /** + * + * + *
+   * Optional. A target for the deployment.
+   * 
+ * + * + * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder + getNamespacedGkeDeploymentTargetOrBuilder(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfig.java new file mode 100644 index 00000000..ead588db --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfig.java @@ -0,0 +1,829 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * Identity related configuration, including service account based
+ * secure multi-tenancy user mappings.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.IdentityConfig} + */ +public final class IdentityConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.IdentityConfig) + IdentityConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use IdentityConfig.newBuilder() to construct. + private IdentityConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private IdentityConfig() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new IdentityConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private IdentityConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + userServiceAccountMapping_ = + com.google.protobuf.MapField.newMapField( + UserServiceAccountMappingDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry + userServiceAccountMapping__ = + input.readMessage( + UserServiceAccountMappingDefaultEntryHolder.defaultEntry + .getParserForType(), + extensionRegistry); + userServiceAccountMapping_ + .getMutableMap() + .put( + userServiceAccountMapping__.getKey(), userServiceAccountMapping__.getValue()); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 1: + return internalGetUserServiceAccountMapping(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_IdentityConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.IdentityConfig.class, + com.google.cloud.dataproc.v1.IdentityConfig.Builder.class); + } + + public static final int USER_SERVICE_ACCOUNT_MAPPING_FIELD_NUMBER = 1; + + private static final class UserServiceAccountMappingDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_IdentityConfig_UserServiceAccountMappingEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + private com.google.protobuf.MapField + userServiceAccountMapping_; + + private com.google.protobuf.MapField + internalGetUserServiceAccountMapping() { + if (userServiceAccountMapping_ == null) { + return com.google.protobuf.MapField.emptyMapField( + UserServiceAccountMappingDefaultEntryHolder.defaultEntry); + } + return userServiceAccountMapping_; + } + + public int getUserServiceAccountMappingCount() { + return internalGetUserServiceAccountMapping().getMap().size(); + } + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public boolean containsUserServiceAccountMapping(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetUserServiceAccountMapping().getMap().containsKey(key); + } + /** Use {@link #getUserServiceAccountMappingMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getUserServiceAccountMapping() { + return getUserServiceAccountMappingMap(); + } + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.Map getUserServiceAccountMappingMap() { + return internalGetUserServiceAccountMapping().getMap(); + } + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.lang.String getUserServiceAccountMappingOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = + internalGetUserServiceAccountMapping().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.lang.String getUserServiceAccountMappingOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = + internalGetUserServiceAccountMapping().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, + internalGetUserServiceAccountMapping(), + UserServiceAccountMappingDefaultEntryHolder.defaultEntry, + 1); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (java.util.Map.Entry entry : + internalGetUserServiceAccountMapping().getMap().entrySet()) { + com.google.protobuf.MapEntry userServiceAccountMapping__ = + UserServiceAccountMappingDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(1, userServiceAccountMapping__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.IdentityConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.IdentityConfig other = + (com.google.cloud.dataproc.v1.IdentityConfig) obj; + + if (!internalGetUserServiceAccountMapping() + .equals(other.internalGetUserServiceAccountMapping())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (!internalGetUserServiceAccountMapping().getMap().isEmpty()) { + hash = (37 * hash) + USER_SERVICE_ACCOUNT_MAPPING_FIELD_NUMBER; + hash = (53 * hash) + internalGetUserServiceAccountMapping().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.IdentityConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Identity related configuration, including service account based
+   * secure multi-tenancy user mappings.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.IdentityConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.IdentityConfig) + com.google.cloud.dataproc.v1.IdentityConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 1: + return internalGetUserServiceAccountMapping(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField(int number) { + switch (number) { + case 1: + return internalGetMutableUserServiceAccountMapping(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_IdentityConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.IdentityConfig.class, + com.google.cloud.dataproc.v1.IdentityConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.IdentityConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + internalGetMutableUserServiceAccountMapping().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.IdentityConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.IdentityConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.IdentityConfig build() { + com.google.cloud.dataproc.v1.IdentityConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.IdentityConfig buildPartial() { + com.google.cloud.dataproc.v1.IdentityConfig result = + new com.google.cloud.dataproc.v1.IdentityConfig(this); + int from_bitField0_ = bitField0_; + result.userServiceAccountMapping_ = internalGetUserServiceAccountMapping(); + result.userServiceAccountMapping_.makeImmutable(); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.IdentityConfig) { + return mergeFrom((com.google.cloud.dataproc.v1.IdentityConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.IdentityConfig other) { + if (other == com.google.cloud.dataproc.v1.IdentityConfig.getDefaultInstance()) return this; + internalGetMutableUserServiceAccountMapping() + .mergeFrom(other.internalGetUserServiceAccountMapping()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.IdentityConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.IdentityConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private com.google.protobuf.MapField + userServiceAccountMapping_; + + private com.google.protobuf.MapField + internalGetUserServiceAccountMapping() { + if (userServiceAccountMapping_ == null) { + return com.google.protobuf.MapField.emptyMapField( + UserServiceAccountMappingDefaultEntryHolder.defaultEntry); + } + return userServiceAccountMapping_; + } + + private com.google.protobuf.MapField + internalGetMutableUserServiceAccountMapping() { + onChanged(); + ; + if (userServiceAccountMapping_ == null) { + userServiceAccountMapping_ = + com.google.protobuf.MapField.newMapField( + UserServiceAccountMappingDefaultEntryHolder.defaultEntry); + } + if (!userServiceAccountMapping_.isMutable()) { + userServiceAccountMapping_ = userServiceAccountMapping_.copy(); + } + return userServiceAccountMapping_; + } + + public int getUserServiceAccountMappingCount() { + return internalGetUserServiceAccountMapping().getMap().size(); + } + /** + * + * + *
+     * Required. Map of user to service account.
+     * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public boolean containsUserServiceAccountMapping(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetUserServiceAccountMapping().getMap().containsKey(key); + } + /** Use {@link #getUserServiceAccountMappingMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getUserServiceAccountMapping() { + return getUserServiceAccountMappingMap(); + } + /** + * + * + *
+     * Required. Map of user to service account.
+     * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.Map getUserServiceAccountMappingMap() { + return internalGetUserServiceAccountMapping().getMap(); + } + /** + * + * + *
+     * Required. Map of user to service account.
+     * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.lang.String getUserServiceAccountMappingOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = + internalGetUserServiceAccountMapping().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Required. Map of user to service account.
+     * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.lang.String getUserServiceAccountMappingOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = + internalGetUserServiceAccountMapping().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearUserServiceAccountMapping() { + internalGetMutableUserServiceAccountMapping().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Required. Map of user to service account.
+     * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeUserServiceAccountMapping(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableUserServiceAccountMapping().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableUserServiceAccountMapping() { + return internalGetMutableUserServiceAccountMapping().getMutableMap(); + } + /** + * + * + *
+     * Required. Map of user to service account.
+     * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder putUserServiceAccountMapping(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + if (value == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableUserServiceAccountMapping().getMutableMap().put(key, value); + return this; + } + /** + * + * + *
+     * Required. Map of user to service account.
+     * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder putAllUserServiceAccountMapping( + java.util.Map values) { + internalGetMutableUserServiceAccountMapping().getMutableMap().putAll(values); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.IdentityConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.IdentityConfig) + private static final com.google.cloud.dataproc.v1.IdentityConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.IdentityConfig(); + } + + public static com.google.cloud.dataproc.v1.IdentityConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IdentityConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new IdentityConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.IdentityConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfigOrBuilder.java new file mode 100644 index 00000000..8e42dfec --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfigOrBuilder.java @@ -0,0 +1,90 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface IdentityConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.IdentityConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getUserServiceAccountMappingCount(); + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + boolean containsUserServiceAccountMapping(java.lang.String key); + /** Use {@link #getUserServiceAccountMappingMap()} instead. */ + @java.lang.Deprecated + java.util.Map getUserServiceAccountMapping(); + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.Map getUserServiceAccountMappingMap(); + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.lang.String getUserServiceAccountMappingOrDefault( + java.lang.String key, java.lang.String defaultValue); + /** + * + * + *
+   * Required. Map of user to service account.
+   * 
+ * + * + * map<string, string> user_service_account_mapping = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.lang.String getUserServiceAccountMappingOrThrow(java.lang.String key); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java index 9293c072..db0e129d 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java @@ -384,7 +384,11 @@ private Preemptibility(int value) { * *
    * Optional. The number of VM instances in the instance group.
-   * For master instance groups, must be set to 1.
+   * For [HA
+   * cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)
+   * [master_config](#FIELDS.master_config) groups, **must be set to 3**.
+   * For standard cluster [master_config](#FIELDS.master_config) groups,
+   * **must be set to 1**.
    * 
* * int32 num_instances = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1474,7 +1478,11 @@ public Builder mergeFrom( * *
      * Optional. The number of VM instances in the instance group.
-     * For master instance groups, must be set to 1.
+     * For [HA
+     * cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)
+     * [master_config](#FIELDS.master_config) groups, **must be set to 3**.
+     * For standard cluster [master_config](#FIELDS.master_config) groups,
+     * **must be set to 1**.
      * 
* * int32 num_instances = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1490,7 +1498,11 @@ public int getNumInstances() { * *
      * Optional. The number of VM instances in the instance group.
-     * For master instance groups, must be set to 1.
+     * For [HA
+     * cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)
+     * [master_config](#FIELDS.master_config) groups, **must be set to 3**.
+     * For standard cluster [master_config](#FIELDS.master_config) groups,
+     * **must be set to 1**.
      * 
* * int32 num_instances = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -1509,7 +1521,11 @@ public Builder setNumInstances(int value) { * *
      * Optional. The number of VM instances in the instance group.
-     * For master instance groups, must be set to 1.
+     * For [HA
+     * cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)
+     * [master_config](#FIELDS.master_config) groups, **must be set to 3**.
+     * For standard cluster [master_config](#FIELDS.master_config) groups,
+     * **must be set to 1**.
      * 
* * int32 num_instances = 1 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java index 9709605d..77f65b6d 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java @@ -28,7 +28,11 @@ public interface InstanceGroupConfigOrBuilder * *
    * Optional. The number of VM instances in the instance group.
-   * For master instance groups, must be set to 1.
+   * For [HA
+   * cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)
+   * [master_config](#FIELDS.master_config) groups, **must be set to 3**.
+   * For standard cluster [master_config](#FIELDS.master_config) groups,
+   * **must be set to 1**.
    * 
* * int32 num_instances = 1 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequest.java index 29844db6..edfd0940 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequest.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequest.java @@ -332,7 +332,7 @@ public int getParametersCount() { * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -356,7 +356,7 @@ public java.util.Map getParameters() { * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -371,7 +371,7 @@ public java.util.Map getParametersMap() { * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -391,7 +391,7 @@ public java.lang.String getParametersOrDefault( * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -1191,7 +1191,7 @@ public int getParametersCount() { * *
      * Optional. Map from parameter names to values that should be used for those
-     * parameters. Values may not exceed 100 characters.
+     * parameters. Values may not exceed 1000 characters.
      * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -1215,7 +1215,7 @@ public java.util.Map getParameters() { * *
      * Optional. Map from parameter names to values that should be used for those
-     * parameters. Values may not exceed 100 characters.
+     * parameters. Values may not exceed 1000 characters.
      * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -1230,7 +1230,7 @@ public java.util.Map getParametersMap() { * *
      * Optional. Map from parameter names to values that should be used for those
-     * parameters. Values may not exceed 100 characters.
+     * parameters. Values may not exceed 1000 characters.
      * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -1250,7 +1250,7 @@ public java.lang.String getParametersOrDefault( * *
      * Optional. Map from parameter names to values that should be used for those
-     * parameters. Values may not exceed 100 characters.
+     * parameters. Values may not exceed 1000 characters.
      * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -1277,7 +1277,7 @@ public Builder clearParameters() { * *
      * Optional. Map from parameter names to values that should be used for those
-     * parameters. Values may not exceed 100 characters.
+     * parameters. Values may not exceed 1000 characters.
      * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -1300,7 +1300,7 @@ public java.util.Map getMutableParameters() * *
      * Optional. Map from parameter names to values that should be used for those
-     * parameters. Values may not exceed 100 characters.
+     * parameters. Values may not exceed 1000 characters.
      * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -1321,7 +1321,7 @@ public Builder putParameters(java.lang.String key, java.lang.String value) { * *
      * Optional. Map from parameter names to values that should be used for those
-     * parameters. Values may not exceed 100 characters.
+     * parameters. Values may not exceed 1000 characters.
      * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequestOrBuilder.java index b7c701af..cd7510b5 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequestOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequestOrBuilder.java @@ -125,7 +125,7 @@ public interface InstantiateWorkflowTemplateRequestOrBuilder * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -137,7 +137,7 @@ public interface InstantiateWorkflowTemplateRequestOrBuilder * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -152,7 +152,7 @@ public interface InstantiateWorkflowTemplateRequestOrBuilder * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -164,7 +164,7 @@ public interface InstantiateWorkflowTemplateRequestOrBuilder * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; @@ -176,7 +176,7 @@ public interface InstantiateWorkflowTemplateRequestOrBuilder * *
    * Optional. Map from parameter names to values that should be used for those
-   * parameters. Values may not exceed 100 characters.
+   * parameters. Values may not exceed 1000 characters.
    * 
* * map<string, string> parameters = 6 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java index 191e0e09..3450f415 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java @@ -1582,8 +1582,8 @@ public com.google.protobuf.ByteString getJobUuidBytes() { * * *
-   * Output only. Indicates whether the job is completed. If the value is `false`,
-   * the job is still in progress. If `true`, the job is completed, and
+   * Output only. Indicates whether the job is completed. If the value is
+   * `false`, the job is still in progress. If `true`, the job is completed, and
    * `status.state` field will indicate if it was successful, failed,
    * or cancelled.
    * 
@@ -6513,8 +6513,8 @@ public Builder setJobUuidBytes(com.google.protobuf.ByteString value) { * * *
-     * Output only. Indicates whether the job is completed. If the value is `false`,
-     * the job is still in progress. If `true`, the job is completed, and
+     * Output only. Indicates whether the job is completed. If the value is
+     * `false`, the job is still in progress. If `true`, the job is completed, and
      * `status.state` field will indicate if it was successful, failed,
      * or cancelled.
      * 
@@ -6531,8 +6531,8 @@ public boolean getDone() { * * *
-     * Output only. Indicates whether the job is completed. If the value is `false`,
-     * the job is still in progress. If `true`, the job is completed, and
+     * Output only. Indicates whether the job is completed. If the value is
+     * `false`, the job is still in progress. If `true`, the job is completed, and
      * `status.state` field will indicate if it was successful, failed,
      * or cancelled.
      * 
@@ -6552,8 +6552,8 @@ public Builder setDone(boolean value) { * * *
-     * Output only. Indicates whether the job is completed. If the value is `false`,
-     * the job is still in progress. If `true`, the job is completed, and
+     * Output only. Indicates whether the job is completed. If the value is
+     * `false`, the job is still in progress. If `true`, the job is completed, and
      * `status.state` field will indicate if it was successful, failed,
      * or cancelled.
      * 
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java index f70e389f..0bc0e6ee 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java @@ -836,8 +836,8 @@ public interface JobOrBuilder * * *
-   * Output only. Indicates whether the job is completed. If the value is `false`,
-   * the job is still in progress. If `true`, the job is completed, and
+   * Output only. Indicates whether the job is completed. If the value is
+   * `false`, the job is still in progress. If `true`, the job is completed, and
    * `status.state` field will indicate if it was successful, failed,
    * or cancelled.
    * 
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacement.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacement.java index 27b3ec8b..97a86edf 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacement.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacement.java @@ -61,6 +61,7 @@ private JobPlacement( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -85,6 +86,23 @@ private JobPlacement( clusterUuid_ = s; break; } + case 26: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + clusterLabels_ = + com.google.protobuf.MapField.newMapField( + ClusterLabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry clusterLabels__ = + input.readMessage( + ClusterLabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + clusterLabels_ + .getMutableMap() + .put(clusterLabels__.getKey(), clusterLabels__.getValue()); + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -109,6 +127,17 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { .internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor; } + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 3: + return internalGetClusterLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { @@ -219,6 +248,115 @@ public com.google.protobuf.ByteString getClusterUuidBytes() { } } + public static final int CLUSTER_LABELS_FIELD_NUMBER = 3; + + private static final class ClusterLabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_JobPlacement_ClusterLabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + private com.google.protobuf.MapField clusterLabels_; + + private com.google.protobuf.MapField + internalGetClusterLabels() { + if (clusterLabels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ClusterLabelsDefaultEntryHolder.defaultEntry); + } + return clusterLabels_; + } + + public int getClusterLabelsCount() { + return internalGetClusterLabels().getMap().size(); + } + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsClusterLabels(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetClusterLabels().getMap().containsKey(key); + } + /** Use {@link #getClusterLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getClusterLabels() { + return getClusterLabelsMap(); + } + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getClusterLabelsMap() { + return internalGetClusterLabels().getMap(); + } + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getClusterLabelsOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetClusterLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getClusterLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetClusterLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -239,6 +377,8 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!getClusterUuidBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterUuid_); } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetClusterLabels(), ClusterLabelsDefaultEntryHolder.defaultEntry, 3); unknownFields.writeTo(output); } @@ -254,6 +394,16 @@ public int getSerializedSize() { if (!getClusterUuidBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterUuid_); } + for (java.util.Map.Entry entry : + internalGetClusterLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry clusterLabels__ = + ClusterLabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, clusterLabels__); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -272,6 +422,7 @@ public boolean equals(final java.lang.Object obj) { if (!getClusterName().equals(other.getClusterName())) return false; if (!getClusterUuid().equals(other.getClusterUuid())) return false; + if (!internalGetClusterLabels().equals(other.internalGetClusterLabels())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -287,6 +438,10 @@ public int hashCode() { hash = (53 * hash) + getClusterName().hashCode(); hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; hash = (53 * hash) + getClusterUuid().hashCode(); + if (!internalGetClusterLabels().getMap().isEmpty()) { + hash = (37 * hash) + CLUSTER_LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetClusterLabels().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -405,6 +560,26 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { .internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor; } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 3: + return internalGetClusterLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField(int number) { + switch (number) { + case 3: + return internalGetMutableClusterLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { @@ -436,6 +611,7 @@ public Builder clear() { clusterUuid_ = ""; + internalGetMutableClusterLabels().clear(); return this; } @@ -463,8 +639,11 @@ public com.google.cloud.dataproc.v1.JobPlacement build() { public com.google.cloud.dataproc.v1.JobPlacement buildPartial() { com.google.cloud.dataproc.v1.JobPlacement result = new com.google.cloud.dataproc.v1.JobPlacement(this); + int from_bitField0_ = bitField0_; result.clusterName_ = clusterName_; result.clusterUuid_ = clusterUuid_; + result.clusterLabels_ = internalGetClusterLabels(); + result.clusterLabels_.makeImmutable(); onBuilt(); return result; } @@ -522,6 +701,7 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.JobPlacement other) { clusterUuid_ = other.clusterUuid_; onChanged(); } + internalGetMutableClusterLabels().mergeFrom(other.internalGetClusterLabels()); this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -551,6 +731,8 @@ public Builder mergeFrom( return this; } + private int bitField0_; + private java.lang.Object clusterName_ = ""; /** * @@ -768,6 +950,180 @@ public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) { return this; } + private com.google.protobuf.MapField clusterLabels_; + + private com.google.protobuf.MapField + internalGetClusterLabels() { + if (clusterLabels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ClusterLabelsDefaultEntryHolder.defaultEntry); + } + return clusterLabels_; + } + + private com.google.protobuf.MapField + internalGetMutableClusterLabels() { + onChanged(); + ; + if (clusterLabels_ == null) { + clusterLabels_ = + com.google.protobuf.MapField.newMapField(ClusterLabelsDefaultEntryHolder.defaultEntry); + } + if (!clusterLabels_.isMutable()) { + clusterLabels_ = clusterLabels_.copy(); + } + return clusterLabels_; + } + + public int getClusterLabelsCount() { + return internalGetClusterLabels().getMap().size(); + } + /** + * + * + *
+     * Optional. Cluster labels to identify a cluster where the job will be
+     * submitted.
+     * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsClusterLabels(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetClusterLabels().getMap().containsKey(key); + } + /** Use {@link #getClusterLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getClusterLabels() { + return getClusterLabelsMap(); + } + /** + * + * + *
+     * Optional. Cluster labels to identify a cluster where the job will be
+     * submitted.
+     * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getClusterLabelsMap() { + return internalGetClusterLabels().getMap(); + } + /** + * + * + *
+     * Optional. Cluster labels to identify a cluster where the job will be
+     * submitted.
+     * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getClusterLabelsOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetClusterLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. Cluster labels to identify a cluster where the job will be
+     * submitted.
+     * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getClusterLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetClusterLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearClusterLabels() { + internalGetMutableClusterLabels().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. Cluster labels to identify a cluster where the job will be
+     * submitted.
+     * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeClusterLabels(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableClusterLabels().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableClusterLabels() { + return internalGetMutableClusterLabels().getMutableMap(); + } + /** + * + * + *
+     * Optional. Cluster labels to identify a cluster where the job will be
+     * submitted.
+     * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putClusterLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + if (value == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableClusterLabels().getMutableMap().put(key, value); + return this; + } + /** + * + * + *
+     * Optional. Cluster labels to identify a cluster where the job will be
+     * submitted.
+     * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllClusterLabels(java.util.Map values) { + internalGetMutableClusterLabels().getMutableMap().putAll(values); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacementOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacementOrBuilder.java index d6fefeb0..696cdbe6 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacementOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacementOrBuilder.java @@ -74,4 +74,68 @@ public interface JobPlacementOrBuilder * @return The bytes for clusterUuid. */ com.google.protobuf.ByteString getClusterUuidBytes(); + + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getClusterLabelsCount(); + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsClusterLabels(java.lang.String key); + /** Use {@link #getClusterLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getClusterLabels(); + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getClusterLabelsMap(); + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getClusterLabelsOrDefault(java.lang.String key, java.lang.String defaultValue); + /** + * + * + *
+   * Optional. Cluster labels to identify a cluster where the job will be
+   * submitted.
+   * 
+ * + * map<string, string> cluster_labels = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getClusterLabelsOrThrow(java.lang.String key); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java index 3aa591a6..046ede89 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java @@ -125,8 +125,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-   * specified, must match the request project ID.
+   * Optional. The ID of the Google Cloud Platform project that the job belongs
+   * to. If specified, must match the request project ID.
    * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -149,8 +149,8 @@ public java.lang.String getProjectId() { * * *
-   * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-   * specified, must match the request project ID.
+   * Optional. The ID of the Google Cloud Platform project that the job belongs
+   * to. If specified, must match the request project ID.
    * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -562,8 +562,8 @@ public Builder mergeFrom( * * *
-     * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-     * specified, must match the request project ID.
+     * Optional. The ID of the Google Cloud Platform project that the job belongs
+     * to. If specified, must match the request project ID.
      * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -585,8 +585,8 @@ public java.lang.String getProjectId() { * * *
-     * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-     * specified, must match the request project ID.
+     * Optional. The ID of the Google Cloud Platform project that the job belongs
+     * to. If specified, must match the request project ID.
      * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -608,8 +608,8 @@ public com.google.protobuf.ByteString getProjectIdBytes() { * * *
-     * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-     * specified, must match the request project ID.
+     * Optional. The ID of the Google Cloud Platform project that the job belongs
+     * to. If specified, must match the request project ID.
      * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -630,8 +630,8 @@ public Builder setProjectId(java.lang.String value) { * * *
-     * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-     * specified, must match the request project ID.
+     * Optional. The ID of the Google Cloud Platform project that the job belongs
+     * to. If specified, must match the request project ID.
      * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -648,8 +648,8 @@ public Builder clearProjectId() { * * *
-     * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-     * specified, must match the request project ID.
+     * Optional. The ID of the Google Cloud Platform project that the job belongs
+     * to. If specified, must match the request project ID.
      * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java index df102fa4..c22d0ffd 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java @@ -27,8 +27,8 @@ public interface JobReferenceOrBuilder * * *
-   * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-   * specified, must match the request project ID.
+   * Optional. The ID of the Google Cloud Platform project that the job belongs
+   * to. If specified, must match the request project ID.
    * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -40,8 +40,8 @@ public interface JobReferenceOrBuilder * * *
-   * Optional. The ID of the Google Cloud Platform project that the job belongs to. If
-   * specified, must match the request project ID.
+   * Optional. The ID of the Google Cloud Platform project that the job belongs
+   * to. If specified, must match the request project ID.
    * 
* * string project_id = 1 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobScheduling.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobScheduling.java index 74d7e7b8..63b20c9a 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobScheduling.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobScheduling.java @@ -73,6 +73,11 @@ private JobScheduling( maxFailuresPerHour_ = input.readInt32(); break; } + case 16: + { + maxFailuresTotal_ = input.readInt32(); + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -114,7 +119,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Optional. Maximum number of times per hour a driver may be restarted as
-   * a result of driver terminating with non-zero code before job is
+   * a result of driver exiting with non-zero code before job is
    * reported failed.
    * A job may be reported as thrashing if driver exits with non-zero code
    * 4 times within 10 minute window.
@@ -130,6 +135,26 @@ public int getMaxFailuresPerHour() {
     return maxFailuresPerHour_;
   }
 
+  public static final int MAX_FAILURES_TOTAL_FIELD_NUMBER = 2;
+  private int maxFailuresTotal_;
+  /**
+   *
+   *
+   * 
+   * Optional. Maximum number of times in total a driver may be restarted as a
+   * result of driver exiting with non-zero code before job is reported failed.
+   * Maximum value is 240.
+   * 
+ * + * int32 max_failures_total = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxFailuresTotal. + */ + @java.lang.Override + public int getMaxFailuresTotal() { + return maxFailuresTotal_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -147,6 +172,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (maxFailuresPerHour_ != 0) { output.writeInt32(1, maxFailuresPerHour_); } + if (maxFailuresTotal_ != 0) { + output.writeInt32(2, maxFailuresTotal_); + } unknownFields.writeTo(output); } @@ -159,6 +187,9 @@ public int getSerializedSize() { if (maxFailuresPerHour_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, maxFailuresPerHour_); } + if (maxFailuresTotal_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, maxFailuresTotal_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -176,6 +207,7 @@ public boolean equals(final java.lang.Object obj) { (com.google.cloud.dataproc.v1.JobScheduling) obj; if (getMaxFailuresPerHour() != other.getMaxFailuresPerHour()) return false; + if (getMaxFailuresTotal() != other.getMaxFailuresTotal()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -189,6 +221,8 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + MAX_FAILURES_PER_HOUR_FIELD_NUMBER; hash = (53 * hash) + getMaxFailuresPerHour(); + hash = (37 * hash) + MAX_FAILURES_TOTAL_FIELD_NUMBER; + hash = (53 * hash) + getMaxFailuresTotal(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -336,6 +370,8 @@ public Builder clear() { super.clear(); maxFailuresPerHour_ = 0; + maxFailuresTotal_ = 0; + return this; } @@ -364,6 +400,7 @@ public com.google.cloud.dataproc.v1.JobScheduling buildPartial() { com.google.cloud.dataproc.v1.JobScheduling result = new com.google.cloud.dataproc.v1.JobScheduling(this); result.maxFailuresPerHour_ = maxFailuresPerHour_; + result.maxFailuresTotal_ = maxFailuresTotal_; onBuilt(); return result; } @@ -416,6 +453,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.JobScheduling other) { if (other.getMaxFailuresPerHour() != 0) { setMaxFailuresPerHour(other.getMaxFailuresPerHour()); } + if (other.getMaxFailuresTotal() != 0) { + setMaxFailuresTotal(other.getMaxFailuresTotal()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -451,7 +491,7 @@ public Builder mergeFrom( * *
      * Optional. Maximum number of times per hour a driver may be restarted as
-     * a result of driver terminating with non-zero code before job is
+     * a result of driver exiting with non-zero code before job is
      * reported failed.
      * A job may be reported as thrashing if driver exits with non-zero code
      * 4 times within 10 minute window.
@@ -471,7 +511,7 @@ public int getMaxFailuresPerHour() {
      *
      * 
      * Optional. Maximum number of times per hour a driver may be restarted as
-     * a result of driver terminating with non-zero code before job is
+     * a result of driver exiting with non-zero code before job is
      * reported failed.
      * A job may be reported as thrashing if driver exits with non-zero code
      * 4 times within 10 minute window.
@@ -494,7 +534,7 @@ public Builder setMaxFailuresPerHour(int value) {
      *
      * 
      * Optional. Maximum number of times per hour a driver may be restarted as
-     * a result of driver terminating with non-zero code before job is
+     * a result of driver exiting with non-zero code before job is
      * reported failed.
      * A job may be reported as thrashing if driver exits with non-zero code
      * 4 times within 10 minute window.
@@ -512,6 +552,64 @@ public Builder clearMaxFailuresPerHour() {
       return this;
     }
 
+    private int maxFailuresTotal_;
+    /**
+     *
+     *
+     * 
+     * Optional. Maximum number of times in total a driver may be restarted as a
+     * result of driver exiting with non-zero code before job is reported failed.
+     * Maximum value is 240.
+     * 
+ * + * int32 max_failures_total = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxFailuresTotal. + */ + @java.lang.Override + public int getMaxFailuresTotal() { + return maxFailuresTotal_; + } + /** + * + * + *
+     * Optional. Maximum number of times in total a driver may be restarted as a
+     * result of driver exiting with non-zero code before job is reported failed.
+     * Maximum value is 240.
+     * 
+ * + * int32 max_failures_total = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The maxFailuresTotal to set. + * @return This builder for chaining. + */ + public Builder setMaxFailuresTotal(int value) { + + maxFailuresTotal_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Maximum number of times in total a driver may be restarted as a
+     * result of driver exiting with non-zero code before job is reported failed.
+     * Maximum value is 240.
+     * 
+ * + * int32 max_failures_total = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMaxFailuresTotal() { + + maxFailuresTotal_ = 0; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobSchedulingOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobSchedulingOrBuilder.java index 15665dc4..29b4b317 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobSchedulingOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobSchedulingOrBuilder.java @@ -28,7 +28,7 @@ public interface JobSchedulingOrBuilder * *
    * Optional. Maximum number of times per hour a driver may be restarted as
-   * a result of driver terminating with non-zero code before job is
+   * a result of driver exiting with non-zero code before job is
    * reported failed.
    * A job may be reported as thrashing if driver exits with non-zero code
    * 4 times within 10 minute window.
@@ -40,4 +40,19 @@ public interface JobSchedulingOrBuilder
    * @return The maxFailuresPerHour.
    */
   int getMaxFailuresPerHour();
+
+  /**
+   *
+   *
+   * 
+   * Optional. Maximum number of times in total a driver may be restarted as a
+   * result of driver exiting with non-zero code before job is reported failed.
+   * Maximum value is 240.
+   * 
+ * + * int32 max_failures_total = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxFailuresTotal. + */ + int getMaxFailuresTotal(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java index 982ab839..6ce22a02 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java @@ -119,6 +119,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_JobPlacement_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_JobPlacement_ClusterLabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_JobPlacement_ClusterLabelsEntry_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_JobStatus_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -281,128 +285,132 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "pertiesEntryB\003\340A\001\022D\n\016logging_config\030\007 \001(" + "\0132\'.google.cloud.dataproc.v1.LoggingConf" + "igB\003\340A\001\0321\n\017PropertiesEntry\022\013\n\003key\030\001 \001(\t\022" - + "\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007queries\"D\n\014JobPlac" - + "ement\022\031\n\014cluster_name\030\001 \001(\tB\003\340A\002\022\031\n\014clus" - + "ter_uuid\030\002 \001(\tB\003\340A\003\"\331\003\n\tJobStatus\022=\n\005sta" - + "te\030\001 \001(\0162).google.cloud.dataproc.v1.JobS" - + "tatus.StateB\003\340A\003\022\027\n\007details\030\002 \001(\tB\006\340A\003\340A" - + "\001\0229\n\020state_start_time\030\006 \001(\0132\032.google.pro" - + "tobuf.TimestampB\003\340A\003\022C\n\010substate\030\007 \001(\0162," - + ".google.cloud.dataproc.v1.JobStatus.Subs" - + "tateB\003\340A\003\"\251\001\n\005State\022\025\n\021STATE_UNSPECIFIED" - + "\020\000\022\013\n\007PENDING\020\001\022\016\n\nSETUP_DONE\020\010\022\013\n\007RUNNI" - + "NG\020\002\022\022\n\016CANCEL_PENDING\020\003\022\022\n\016CANCEL_START" - + "ED\020\007\022\r\n\tCANCELLED\020\004\022\010\n\004DONE\020\005\022\t\n\005ERROR\020\006" - + "\022\023\n\017ATTEMPT_FAILURE\020\t\"H\n\010Substate\022\017\n\013UNS" - + "PECIFIED\020\000\022\r\n\tSUBMITTED\020\001\022\n\n\006QUEUED\020\002\022\020\n" - + "\014STALE_STATUS\020\003\"<\n\014JobReference\022\027\n\nproje" - + "ct_id\030\001 \001(\tB\003\340A\001\022\023\n\006job_id\030\002 \001(\tB\003\340A\001\"\245\002" - + "\n\017YarnApplication\022\021\n\004name\030\001 \001(\tB\003\340A\002\022C\n\005" - + "state\030\002 \001(\0162/.google.cloud.dataproc.v1.Y" - + "arnApplication.StateB\003\340A\002\022\025\n\010progress\030\003 " - + "\001(\002B\003\340A\002\022\031\n\014tracking_url\030\004 \001(\tB\003\340A\001\"\207\001\n\005" - + "State\022\025\n\021STATE_UNSPECIFIED\020\000\022\007\n\003NEW\020\001\022\016\n" - + "\nNEW_SAVING\020\002\022\r\n\tSUBMITTED\020\003\022\014\n\010ACCEPTED" - + "\020\004\022\013\n\007RUNNING\020\005\022\014\n\010FINISHED\020\006\022\n\n\006FAILED\020" - + "\007\022\n\n\006KILLED\020\010\"\377\010\n\003Job\022>\n\treference\030\001 \001(\013" - + "2&.google.cloud.dataproc.v1.JobReference" - + "B\003\340A\001\022>\n\tplacement\030\002 \001(\0132&.google.cloud." - + "dataproc.v1.JobPlacementB\003\340A\002\022>\n\nhadoop_" - + "job\030\003 \001(\0132#.google.cloud.dataproc.v1.Had" - + "oopJobB\003\340A\001H\000\022<\n\tspark_job\030\004 \001(\0132\".googl" - + "e.cloud.dataproc.v1.SparkJobB\003\340A\001H\000\022@\n\013p" - + "yspark_job\030\005 \001(\0132$.google.cloud.dataproc" - + ".v1.PySparkJobB\003\340A\001H\000\022:\n\010hive_job\030\006 \001(\0132" - + "!.google.cloud.dataproc.v1.HiveJobB\003\340A\001H" - + "\000\0228\n\007pig_job\030\007 \001(\0132 .google.cloud.datapr" - + "oc.v1.PigJobB\003\340A\001H\000\022?\n\013spark_r_job\030\025 \001(\013" - + "2#.google.cloud.dataproc.v1.SparkRJobB\003\340" - + "A\001H\000\022C\n\rspark_sql_job\030\014 \001(\0132%.google.clo" - + "ud.dataproc.v1.SparkSqlJobB\003\340A\001H\000\022>\n\npre" - + "sto_job\030\027 \001(\0132#.google.cloud.dataproc.v1" - + ".PrestoJobB\003\340A\001H\000\0228\n\006status\030\010 \001(\0132#.goog" - + "le.cloud.dataproc.v1.JobStatusB\003\340A\003\022@\n\016s" - + "tatus_history\030\r \003(\0132#.google.cloud.datap" - + "roc.v1.JobStatusB\003\340A\003\022I\n\021yarn_applicatio" - + "ns\030\t \003(\0132).google.cloud.dataproc.v1.Yarn" - + "ApplicationB\003\340A\003\022\'\n\032driver_output_resour" - + "ce_uri\030\021 \001(\tB\003\340A\003\022%\n\030driver_control_file" - + "s_uri\030\017 \001(\tB\003\340A\003\022>\n\006labels\030\022 \003(\0132).googl" - + "e.cloud.dataproc.v1.Job.LabelsEntryB\003\340A\001" - + "\022@\n\nscheduling\030\024 \001(\0132\'.google.cloud.data" - + "proc.v1.JobSchedulingB\003\340A\001\022\025\n\010job_uuid\030\026" - + " \001(\tB\003\340A\003\022\021\n\004done\030\030 \001(\010B\003\340A\003\032-\n\013LabelsEn" - + "try\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\n\n\010t" - + "ype_job\"3\n\rJobScheduling\022\"\n\025max_failures" - + "_per_hour\030\001 \001(\005B\003\340A\001\"\212\001\n\020SubmitJobReques" - + "t\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001" - + "(\tB\003\340A\002\022/\n\003job\030\002 \001(\0132\035.google.cloud.data" - + "proc.v1.JobB\003\340A\002\022\027\n\nrequest_id\030\004 \001(\tB\003\340A" - + "\001\"\256\001\n\013JobMetadata\022\023\n\006job_id\030\001 \001(\tB\003\340A\003\0228" - + "\n\006status\030\002 \001(\0132#.google.cloud.dataproc.v" - + "1.JobStatusB\003\340A\003\022\033\n\016operation_type\030\003 \001(\t" - + "B\003\340A\003\0223\n\nstart_time\030\004 \001(\0132\032.google.proto" - + "buf.TimestampB\003\340A\003\"R\n\rGetJobRequest\022\027\n\np" - + "roject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A" - + "\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\002\"\263\002\n\017ListJobsReque" - + "st\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\006 " - + "\001(\tB\003\340A\002\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npage" - + "_token\030\003 \001(\tB\003\340A\001\022\031\n\014cluster_name\030\004 \001(\tB" - + "\003\340A\001\022Y\n\021job_state_matcher\030\005 \001(\01629.google" - + ".cloud.dataproc.v1.ListJobsRequest.JobSt" - + "ateMatcherB\003\340A\001\022\023\n\006filter\030\007 \001(\tB\003\340A\001\"6\n\017" - + "JobStateMatcher\022\007\n\003ALL\020\000\022\n\n\006ACTIVE\020\001\022\016\n\n" - + "NON_ACTIVE\020\002\"\274\001\n\020UpdateJobRequest\022\027\n\npro" - + "ject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\002 \001(\tB\003\340A\002\022" - + "\023\n\006job_id\030\003 \001(\tB\003\340A\002\022/\n\003job\030\004 \001(\0132\035.goog" - + "le.cloud.dataproc.v1.JobB\003\340A\002\0224\n\013update_" - + "mask\030\005 \001(\0132\032.google.protobuf.FieldMaskB\003" - + "\340A\002\"b\n\020ListJobsResponse\0220\n\004jobs\030\001 \003(\0132\035." - + "google.cloud.dataproc.v1.JobB\003\340A\003\022\034\n\017nex" - + "t_page_token\030\002 \001(\tB\003\340A\001\"U\n\020CancelJobRequ" - + "est\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003" - + " \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\002\"U\n\020Delete" - + "JobRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006r" - + "egion\030\003 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\0022\374\n" - + "\n\rJobController\022\261\001\n\tSubmitJob\022*.google.c" - + "loud.dataproc.v1.SubmitJobRequest\032\035.goog" - + "le.cloud.dataproc.v1.Job\"Y\202\323\344\223\002;\"6/v1/pr" - + "ojects/{project_id}/regions/{region}/job" - + "s:submit:\001*\332A\025project_id,region,job\022\336\001\n\024" - + "SubmitJobAsOperation\022*.google.cloud.data" - + "proc.v1.SubmitJobRequest\032\035.google.longru" - + "nning.Operation\"{\202\323\344\223\002F\"A/v1/projects/{p" - + "roject_id}/regions/{region}/jobs:submitA" - + "sOperation:\001*\332A\027project_id, region, job\312" - + "A\022\n\003Job\022\013JobMetadata\022\255\001\n\006GetJob\022\'.google" - + ".cloud.dataproc.v1.GetJobRequest\032\035.googl" - + "e.cloud.dataproc.v1.Job\"[\202\323\344\223\002:\0228/v1/pro" - + "jects/{project_id}/regions/{region}/jobs" - + "/{job_id}\332A\030project_id,region,job_id\022\311\001\n" - + "\010ListJobs\022).google.cloud.dataproc.v1.Lis" - + "tJobsRequest\032*.google.cloud.dataproc.v1." - + "ListJobsResponse\"f\202\323\344\223\0021\022//v1/projects/{" - + "project_id}/regions/{region}/jobs\332A\021proj" - + "ect_id,region\332A\030project_id,region,filter" - + "\022\235\001\n\tUpdateJob\022*.google.cloud.dataproc.v" - + "1.UpdateJobRequest\032\035.google.cloud.datapr" - + "oc.v1.Job\"E\202\323\344\223\002?28/v1/projects/{project" - + "_id}/regions/{region}/jobs/{job_id}:\003job" - + "\022\275\001\n\tCancelJob\022*.google.cloud.dataproc.v" - + "1.CancelJobRequest\032\035.google.cloud.datapr" - + "oc.v1.Job\"e\202\323\344\223\002D\"?/v1/projects/{project" - + "_id}/regions/{region}/jobs/{job_id}:canc" - + "el:\001*\332A\030project_id,region,job_id\022\254\001\n\tDel" - + "eteJob\022*.google.cloud.dataproc.v1.Delete" - + "JobRequest\032\026.google.protobuf.Empty\"[\202\323\344\223" - + "\002:*8/v1/projects/{project_id}/regions/{r" - + "egion}/jobs/{job_id}\332A\030project_id,region" - + ",job_id\032K\312A\027dataproc.googleapis.com\322A.ht" - + "tps://www.googleapis.com/auth/cloud-plat" - + "formBm\n\034com.google.cloud.dataproc.v1B\tJo" - + "bsProtoP\001Z@google.golang.org/genproto/go" - + "ogleapis/cloud/dataproc/v1;dataprocb\006pro" - + "to3" + + "\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007queries\"\322\001\n\014JobPla" + + "cement\022\031\n\014cluster_name\030\001 \001(\tB\003\340A\002\022\031\n\014clu" + + "ster_uuid\030\002 \001(\tB\003\340A\003\022V\n\016cluster_labels\030\003" + + " \003(\01329.google.cloud.dataproc.v1.JobPlace" + + "ment.ClusterLabelsEntryB\003\340A\001\0324\n\022ClusterL" + + "abelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\002" + + "8\001\"\331\003\n\tJobStatus\022=\n\005state\030\001 \001(\0162).google" + + ".cloud.dataproc.v1.JobStatus.StateB\003\340A\003\022" + + "\027\n\007details\030\002 \001(\tB\006\340A\003\340A\001\0229\n\020state_start_" + + "time\030\006 \001(\0132\032.google.protobuf.TimestampB\003" + + "\340A\003\022C\n\010substate\030\007 \001(\0162,.google.cloud.dat" + + "aproc.v1.JobStatus.SubstateB\003\340A\003\"\251\001\n\005Sta" + + "te\022\025\n\021STATE_UNSPECIFIED\020\000\022\013\n\007PENDING\020\001\022\016" + + "\n\nSETUP_DONE\020\010\022\013\n\007RUNNING\020\002\022\022\n\016CANCEL_PE" + + "NDING\020\003\022\022\n\016CANCEL_STARTED\020\007\022\r\n\tCANCELLED" + + "\020\004\022\010\n\004DONE\020\005\022\t\n\005ERROR\020\006\022\023\n\017ATTEMPT_FAILU" + + "RE\020\t\"H\n\010Substate\022\017\n\013UNSPECIFIED\020\000\022\r\n\tSUB" + + "MITTED\020\001\022\n\n\006QUEUED\020\002\022\020\n\014STALE_STATUS\020\003\"<" + + "\n\014JobReference\022\027\n\nproject_id\030\001 \001(\tB\003\340A\001\022" + + "\023\n\006job_id\030\002 \001(\tB\003\340A\001\"\245\002\n\017YarnApplication" + + "\022\021\n\004name\030\001 \001(\tB\003\340A\002\022C\n\005state\030\002 \001(\0162/.goo" + + "gle.cloud.dataproc.v1.YarnApplication.St" + + "ateB\003\340A\002\022\025\n\010progress\030\003 \001(\002B\003\340A\002\022\031\n\014track" + + "ing_url\030\004 \001(\tB\003\340A\001\"\207\001\n\005State\022\025\n\021STATE_UN" + + "SPECIFIED\020\000\022\007\n\003NEW\020\001\022\016\n\nNEW_SAVING\020\002\022\r\n\t" + + "SUBMITTED\020\003\022\014\n\010ACCEPTED\020\004\022\013\n\007RUNNING\020\005\022\014" + + "\n\010FINISHED\020\006\022\n\n\006FAILED\020\007\022\n\n\006KILLED\020\010\"\377\010\n" + + "\003Job\022>\n\treference\030\001 \001(\0132&.google.cloud.d" + + "ataproc.v1.JobReferenceB\003\340A\001\022>\n\tplacemen" + + "t\030\002 \001(\0132&.google.cloud.dataproc.v1.JobPl" + + "acementB\003\340A\002\022>\n\nhadoop_job\030\003 \001(\0132#.googl" + + "e.cloud.dataproc.v1.HadoopJobB\003\340A\001H\000\022<\n\t" + + "spark_job\030\004 \001(\0132\".google.cloud.dataproc." + + "v1.SparkJobB\003\340A\001H\000\022@\n\013pyspark_job\030\005 \001(\0132" + + "$.google.cloud.dataproc.v1.PySparkJobB\003\340" + + "A\001H\000\022:\n\010hive_job\030\006 \001(\0132!.google.cloud.da" + + "taproc.v1.HiveJobB\003\340A\001H\000\0228\n\007pig_job\030\007 \001(" + + "\0132 .google.cloud.dataproc.v1.PigJobB\003\340A\001" + + "H\000\022?\n\013spark_r_job\030\025 \001(\0132#.google.cloud.d" + + "ataproc.v1.SparkRJobB\003\340A\001H\000\022C\n\rspark_sql" + + "_job\030\014 \001(\0132%.google.cloud.dataproc.v1.Sp" + + "arkSqlJobB\003\340A\001H\000\022>\n\npresto_job\030\027 \001(\0132#.g" + + "oogle.cloud.dataproc.v1.PrestoJobB\003\340A\001H\000" + + "\0228\n\006status\030\010 \001(\0132#.google.cloud.dataproc" + + ".v1.JobStatusB\003\340A\003\022@\n\016status_history\030\r \003" + + "(\0132#.google.cloud.dataproc.v1.JobStatusB" + + "\003\340A\003\022I\n\021yarn_applications\030\t \003(\0132).google" + + ".cloud.dataproc.v1.YarnApplicationB\003\340A\003\022" + + "\'\n\032driver_output_resource_uri\030\021 \001(\tB\003\340A\003" + + "\022%\n\030driver_control_files_uri\030\017 \001(\tB\003\340A\003\022" + + ">\n\006labels\030\022 \003(\0132).google.cloud.dataproc." + + "v1.Job.LabelsEntryB\003\340A\001\022@\n\nscheduling\030\024 " + + "\001(\0132\'.google.cloud.dataproc.v1.JobSchedu" + + "lingB\003\340A\001\022\025\n\010job_uuid\030\026 \001(\tB\003\340A\003\022\021\n\004done" + + "\030\030 \001(\010B\003\340A\003\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022" + + "\r\n\005value\030\002 \001(\t:\0028\001B\n\n\010type_job\"T\n\rJobSch" + + "eduling\022\"\n\025max_failures_per_hour\030\001 \001(\005B\003" + + "\340A\001\022\037\n\022max_failures_total\030\002 \001(\005B\003\340A\001\"\212\001\n" + + "\020SubmitJobRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340" + + "A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022/\n\003job\030\002 \001(\0132\035.g" + + "oogle.cloud.dataproc.v1.JobB\003\340A\002\022\027\n\nrequ" + + "est_id\030\004 \001(\tB\003\340A\001\"\256\001\n\013JobMetadata\022\023\n\006job" + + "_id\030\001 \001(\tB\003\340A\003\0228\n\006status\030\002 \001(\0132#.google." + + "cloud.dataproc.v1.JobStatusB\003\340A\003\022\033\n\016oper" + + "ation_type\030\003 \001(\tB\003\340A\003\0223\n\nstart_time\030\004 \001(" + + "\0132\032.google.protobuf.TimestampB\003\340A\003\"R\n\rGe" + + "tJobRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006" + + "region\030\003 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\002\"\263" + + "\002\n\017ListJobsRequest\022\027\n\nproject_id\030\001 \001(\tB\003" + + "\340A\002\022\023\n\006region\030\006 \001(\tB\003\340A\002\022\026\n\tpage_size\030\002 " + + "\001(\005B\003\340A\001\022\027\n\npage_token\030\003 \001(\tB\003\340A\001\022\031\n\014clu" + + "ster_name\030\004 \001(\tB\003\340A\001\022Y\n\021job_state_matche" + + "r\030\005 \001(\01629.google.cloud.dataproc.v1.ListJ" + + "obsRequest.JobStateMatcherB\003\340A\001\022\023\n\006filte" + + "r\030\007 \001(\tB\003\340A\001\"6\n\017JobStateMatcher\022\007\n\003ALL\020\000" + + "\022\n\n\006ACTIVE\020\001\022\016\n\nNON_ACTIVE\020\002\"\274\001\n\020UpdateJ" + + "obRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006re" + + "gion\030\002 \001(\tB\003\340A\002\022\023\n\006job_id\030\003 \001(\tB\003\340A\002\022/\n\003" + + "job\030\004 \001(\0132\035.google.cloud.dataproc.v1.Job" + + "B\003\340A\002\0224\n\013update_mask\030\005 \001(\0132\032.google.prot" + + "obuf.FieldMaskB\003\340A\002\"b\n\020ListJobsResponse\022" + + "0\n\004jobs\030\001 \003(\0132\035.google.cloud.dataproc.v1" + + ".JobB\003\340A\003\022\034\n\017next_page_token\030\002 \001(\tB\003\340A\001\"" + + "U\n\020CancelJobRequest\022\027\n\nproject_id\030\001 \001(\tB" + + "\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(" + + "\tB\003\340A\002\"U\n\020DeleteJobRequest\022\027\n\nproject_id" + + "\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\023\n\006job_" + + "id\030\002 \001(\tB\003\340A\0022\374\n\n\rJobController\022\261\001\n\tSubm" + + "itJob\022*.google.cloud.dataproc.v1.SubmitJ" + + "obRequest\032\035.google.cloud.dataproc.v1.Job" + + "\"Y\202\323\344\223\002;\"6/v1/projects/{project_id}/regi" + + "ons/{region}/jobs:submit:\001*\332A\025project_id" + + ",region,job\022\336\001\n\024SubmitJobAsOperation\022*.g" + + "oogle.cloud.dataproc.v1.SubmitJobRequest" + + "\032\035.google.longrunning.Operation\"{\202\323\344\223\002F\"" + + "A/v1/projects/{project_id}/regions/{regi" + + "on}/jobs:submitAsOperation:\001*\332A\027project_" + + "id, region, job\312A\022\n\003Job\022\013JobMetadata\022\255\001\n" + + "\006GetJob\022\'.google.cloud.dataproc.v1.GetJo" + + "bRequest\032\035.google.cloud.dataproc.v1.Job\"" + + "[\202\323\344\223\002:\0228/v1/projects/{project_id}/regio" + + "ns/{region}/jobs/{job_id}\332A\030project_id,r" + + "egion,job_id\022\311\001\n\010ListJobs\022).google.cloud" + + ".dataproc.v1.ListJobsRequest\032*.google.cl" + + "oud.dataproc.v1.ListJobsResponse\"f\202\323\344\223\0021" + + "\022//v1/projects/{project_id}/regions/{reg" + + "ion}/jobs\332A\021project_id,region\332A\030project_" + + "id,region,filter\022\235\001\n\tUpdateJob\022*.google." + + "cloud.dataproc.v1.UpdateJobRequest\032\035.goo" + + "gle.cloud.dataproc.v1.Job\"E\202\323\344\223\002?28/v1/p" + + "rojects/{project_id}/regions/{region}/jo" + + "bs/{job_id}:\003job\022\275\001\n\tCancelJob\022*.google." + + "cloud.dataproc.v1.CancelJobRequest\032\035.goo" + + "gle.cloud.dataproc.v1.Job\"e\202\323\344\223\002D\"?/v1/p" + + "rojects/{project_id}/regions/{region}/jo" + + "bs/{job_id}:cancel:\001*\332A\030project_id,regio" + + "n,job_id\022\254\001\n\tDeleteJob\022*.google.cloud.da" + + "taproc.v1.DeleteJobRequest\032\026.google.prot" + + "obuf.Empty\"[\202\323\344\223\002:*8/v1/projects/{projec" + + "t_id}/regions/{region}/jobs/{job_id}\332A\030p" + + "roject_id,region,job_id\032K\312A\027dataproc.goo" + + "gleapis.com\322A.https://www.googleapis.com" + + "/auth/cloud-platformBm\n\034com.google.cloud" + + ".dataproc.v1B\tJobsProtoP\001Z@google.golang" + + ".org/genproto/googleapis/cloud/dataproc/" + + "v1;dataprocb\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -647,7 +655,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor, new java.lang.String[] { - "ClusterName", "ClusterUuid", + "ClusterName", "ClusterUuid", "ClusterLabels", + }); + internal_static_google_cloud_dataproc_v1_JobPlacement_ClusterLabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1_JobPlacement_ClusterLabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_JobPlacement_ClusterLabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", }); internal_static_google_cloud_dataproc_v1_JobStatus_descriptor = getDescriptor().getMessageTypes().get(11); @@ -714,7 +730,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_JobScheduling_descriptor, new java.lang.String[] { - "MaxFailuresPerHour", + "MaxFailuresPerHour", "MaxFailuresTotal", }); internal_static_google_cloud_dataproc_v1_SubmitJobRequest_descriptor = getDescriptor().getMessageTypes().get(16); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfig.java index fb3d78bd..b7aaf98c 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfig.java @@ -242,11 +242,11 @@ public boolean getEnableKerberos() { * * *
-   * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+   * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
    * principal password.
    * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * @return The rootPrincipalPasswordUri. */ @@ -266,11 +266,11 @@ public java.lang.String getRootPrincipalPasswordUri() { * * *
-   * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+   * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
    * principal password.
    * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * @return The bytes for rootPrincipalPasswordUri. */ @@ -293,11 +293,11 @@ public com.google.protobuf.ByteString getRootPrincipalPasswordUriBytes() { * * *
-   * Required. The uri of the KMS key used to encrypt various sensitive
+   * Optional. The uri of the KMS key used to encrypt various sensitive
    * files.
    * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The kmsKeyUri. */ @@ -317,11 +317,11 @@ public java.lang.String getKmsKeyUri() { * * *
-   * Required. The uri of the KMS key used to encrypt various sensitive
+   * Optional. The uri of the KMS key used to encrypt various sensitive
    * files.
    * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The bytes for kmsKeyUri. */ @@ -1542,11 +1542,11 @@ public Builder clearEnableKerberos() { * * *
-     * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+     * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
      * principal password.
      * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The rootPrincipalPasswordUri. @@ -1566,11 +1566,11 @@ public java.lang.String getRootPrincipalPasswordUri() { * * *
-     * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+     * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
      * principal password.
      * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The bytes for rootPrincipalPasswordUri. @@ -1590,11 +1590,11 @@ public com.google.protobuf.ByteString getRootPrincipalPasswordUriBytes() { * * *
-     * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+     * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
      * principal password.
      * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * * @param value The rootPrincipalPasswordUri to set. @@ -1613,11 +1613,11 @@ public Builder setRootPrincipalPasswordUri(java.lang.String value) { * * *
-     * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+     * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
      * principal password.
      * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * * @return This builder for chaining. @@ -1632,11 +1632,11 @@ public Builder clearRootPrincipalPasswordUri() { * * *
-     * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+     * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
      * principal password.
      * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * * @param value The bytes for rootPrincipalPasswordUri to set. @@ -1658,11 +1658,11 @@ public Builder setRootPrincipalPasswordUriBytes(com.google.protobuf.ByteString v * * *
-     * Required. The uri of the KMS key used to encrypt various sensitive
+     * Optional. The uri of the KMS key used to encrypt various sensitive
      * files.
      * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The kmsKeyUri. */ @@ -1681,11 +1681,11 @@ public java.lang.String getKmsKeyUri() { * * *
-     * Required. The uri of the KMS key used to encrypt various sensitive
+     * Optional. The uri of the KMS key used to encrypt various sensitive
      * files.
      * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The bytes for kmsKeyUri. */ @@ -1704,11 +1704,11 @@ public com.google.protobuf.ByteString getKmsKeyUriBytes() { * * *
-     * Required. The uri of the KMS key used to encrypt various sensitive
+     * Optional. The uri of the KMS key used to encrypt various sensitive
      * files.
      * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @param value The kmsKeyUri to set. * @return This builder for chaining. @@ -1726,11 +1726,11 @@ public Builder setKmsKeyUri(java.lang.String value) { * * *
-     * Required. The uri of the KMS key used to encrypt various sensitive
+     * Optional. The uri of the KMS key used to encrypt various sensitive
      * files.
      * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return This builder for chaining. */ @@ -1744,11 +1744,11 @@ public Builder clearKmsKeyUri() { * * *
-     * Required. The uri of the KMS key used to encrypt various sensitive
+     * Optional. The uri of the KMS key used to encrypt various sensitive
      * files.
      * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @param value The bytes for kmsKeyUri to set. * @return This builder for chaining. diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfigOrBuilder.java index 426c5c3a..0f7dc472 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfigOrBuilder.java @@ -41,11 +41,11 @@ public interface KerberosConfigOrBuilder * * *
-   * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+   * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
    * principal password.
    * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * @return The rootPrincipalPasswordUri. */ @@ -54,11 +54,11 @@ public interface KerberosConfigOrBuilder * * *
-   * Required. The Cloud Storage URI of a KMS encrypted file containing the root
+   * Optional. The Cloud Storage URI of a KMS encrypted file containing the root
    * principal password.
    * 
* - * string root_principal_password_uri = 2 [(.google.api.field_behavior) = REQUIRED]; + * string root_principal_password_uri = 2 [(.google.api.field_behavior) = OPTIONAL]; * * @return The bytes for rootPrincipalPasswordUri. */ @@ -68,11 +68,11 @@ public interface KerberosConfigOrBuilder * * *
-   * Required. The uri of the KMS key used to encrypt various sensitive
+   * Optional. The uri of the KMS key used to encrypt various sensitive
    * files.
    * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The kmsKeyUri. */ @@ -81,11 +81,11 @@ public interface KerberosConfigOrBuilder * * *
-   * Required. The uri of the KMS key used to encrypt various sensitive
+   * Optional. The uri of the KMS key used to encrypt various sensitive
    * files.
    * 
* - * string kms_key_uri = 3 [(.google.api.field_behavior) = REQUIRED]; + * string kms_key_uri = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The bytes for kmsKeyUri. */ diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java index 09df34bf..92751d87 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java @@ -215,9 +215,9 @@ public TtlCase getTtlCase() { *
    * Optional. The duration to keep the cluster alive while idling (when no jobs
    * are running). Passing this threshold will cause the cluster to be
-   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
    * representation of
-   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -235,9 +235,9 @@ public boolean hasIdleDeleteTtl() { *
    * Optional. The duration to keep the cluster alive while idling (when no jobs
    * are running). Passing this threshold will cause the cluster to be
-   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
    * representation of
-   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -257,9 +257,9 @@ public com.google.protobuf.Duration getIdleDeleteTtl() { *
    * Optional. The duration to keep the cluster alive while idling (when no jobs
    * are running). Passing this threshold will cause the cluster to be
-   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
    * representation of
-   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -910,9 +910,9 @@ public Builder clearTtl() { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -930,9 +930,9 @@ public boolean hasIdleDeleteTtl() { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -956,9 +956,9 @@ public com.google.protobuf.Duration getIdleDeleteTtl() { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -984,9 +984,9 @@ public Builder setIdleDeleteTtl(com.google.protobuf.Duration value) { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1009,9 +1009,9 @@ public Builder setIdleDeleteTtl(com.google.protobuf.Duration.Builder builderForV *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1041,9 +1041,9 @@ public Builder mergeIdleDeleteTtl(com.google.protobuf.Duration value) { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1067,9 +1067,9 @@ public Builder clearIdleDeleteTtl() { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1087,9 +1087,9 @@ public com.google.protobuf.Duration.Builder getIdleDeleteTtlBuilder() { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1111,9 +1111,9 @@ public com.google.protobuf.DurationOrBuilder getIdleDeleteTtlOrBuilder() { *
      * Optional. The duration to keep the cluster alive while idling (when no jobs
      * are running). Passing this threshold will cause the cluster to be
-     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
      * representation of
-     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java index 624e8e12..3f94c68e 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java @@ -29,9 +29,9 @@ public interface LifecycleConfigOrBuilder *
    * Optional. The duration to keep the cluster alive while idling (when no jobs
    * are running). Passing this threshold will cause the cluster to be
-   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
    * representation of
-   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -46,9 +46,9 @@ public interface LifecycleConfigOrBuilder *
    * Optional. The duration to keep the cluster alive while idling (when no jobs
    * are running). Passing this threshold will cause the cluster to be
-   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
    * representation of
-   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -63,9 +63,9 @@ public interface LifecycleConfigOrBuilder *
    * Optional. The duration to keep the cluster alive while idling (when no jobs
    * are running). Passing this threshold will cause the cluster to be
-   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
    * representation of
-   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfig.java new file mode 100644 index 00000000..ca49e495 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfig.java @@ -0,0 +1,663 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * Specifies a Metastore configuration.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.MetastoreConfig} + */ +public final class MetastoreConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.MetastoreConfig) + MetastoreConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use MetastoreConfig.newBuilder() to construct. + private MetastoreConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetastoreConfig() { + dataprocMetastoreService_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MetastoreConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private MetastoreConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + dataprocMetastoreService_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_MetastoreConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.MetastoreConfig.class, + com.google.cloud.dataproc.v1.MetastoreConfig.Builder.class); + } + + public static final int DATAPROC_METASTORE_SERVICE_FIELD_NUMBER = 1; + private volatile java.lang.Object dataprocMetastoreService_; + /** + * + * + *
+   * Required. Resource name of an existing Dataproc Metastore service.
+   * Example:
+   * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+   * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The dataprocMetastoreService. + */ + @java.lang.Override + public java.lang.String getDataprocMetastoreService() { + java.lang.Object ref = dataprocMetastoreService_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + dataprocMetastoreService_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Resource name of an existing Dataproc Metastore service.
+   * Example:
+   * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+   * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for dataprocMetastoreService. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDataprocMetastoreServiceBytes() { + java.lang.Object ref = dataprocMetastoreService_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + dataprocMetastoreService_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getDataprocMetastoreServiceBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, dataprocMetastoreService_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getDataprocMetastoreServiceBytes().isEmpty()) { + size += + com.google.protobuf.GeneratedMessageV3.computeStringSize(1, dataprocMetastoreService_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.MetastoreConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.MetastoreConfig other = + (com.google.cloud.dataproc.v1.MetastoreConfig) obj; + + if (!getDataprocMetastoreService().equals(other.getDataprocMetastoreService())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATAPROC_METASTORE_SERVICE_FIELD_NUMBER; + hash = (53 * hash) + getDataprocMetastoreService().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.MetastoreConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Specifies a Metastore configuration.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.MetastoreConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.MetastoreConfig) + com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_MetastoreConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.MetastoreConfig.class, + com.google.cloud.dataproc.v1.MetastoreConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.MetastoreConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + dataprocMetastoreService_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.MetastoreConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.MetastoreConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.MetastoreConfig build() { + com.google.cloud.dataproc.v1.MetastoreConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.MetastoreConfig buildPartial() { + com.google.cloud.dataproc.v1.MetastoreConfig result = + new com.google.cloud.dataproc.v1.MetastoreConfig(this); + result.dataprocMetastoreService_ = dataprocMetastoreService_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.MetastoreConfig) { + return mergeFrom((com.google.cloud.dataproc.v1.MetastoreConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.MetastoreConfig other) { + if (other == com.google.cloud.dataproc.v1.MetastoreConfig.getDefaultInstance()) return this; + if (!other.getDataprocMetastoreService().isEmpty()) { + dataprocMetastoreService_ = other.dataprocMetastoreService_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.MetastoreConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.MetastoreConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object dataprocMetastoreService_ = ""; + /** + * + * + *
+     * Required. Resource name of an existing Dataproc Metastore service.
+     * Example:
+     * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+     * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The dataprocMetastoreService. + */ + public java.lang.String getDataprocMetastoreService() { + java.lang.Object ref = dataprocMetastoreService_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + dataprocMetastoreService_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Resource name of an existing Dataproc Metastore service.
+     * Example:
+     * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+     * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for dataprocMetastoreService. + */ + public com.google.protobuf.ByteString getDataprocMetastoreServiceBytes() { + java.lang.Object ref = dataprocMetastoreService_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + dataprocMetastoreService_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Resource name of an existing Dataproc Metastore service.
+     * Example:
+     * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+     * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The dataprocMetastoreService to set. + * @return This builder for chaining. + */ + public Builder setDataprocMetastoreService(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + dataprocMetastoreService_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Resource name of an existing Dataproc Metastore service.
+     * Example:
+     * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+     * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDataprocMetastoreService() { + + dataprocMetastoreService_ = getDefaultInstance().getDataprocMetastoreService(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Resource name of an existing Dataproc Metastore service.
+     * Example:
+     * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+     * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for dataprocMetastoreService to set. + * @return This builder for chaining. + */ + public Builder setDataprocMetastoreServiceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + dataprocMetastoreService_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.MetastoreConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.MetastoreConfig) + private static final com.google.cloud.dataproc.v1.MetastoreConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.MetastoreConfig(); + } + + public static com.google.cloud.dataproc.v1.MetastoreConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MetastoreConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MetastoreConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.MetastoreConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfigOrBuilder.java new file mode 100644 index 00000000..c0c1d17b --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfigOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface MetastoreConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.MetastoreConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Resource name of an existing Dataproc Metastore service.
+   * Example:
+   * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+   * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The dataprocMetastoreService. + */ + java.lang.String getDataprocMetastoreService(); + /** + * + * + *
+   * Required. Resource name of an existing Dataproc Metastore service.
+   * Example:
+   * * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
+   * 
+ * + * + * string dataproc_metastore_service = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for dataprocMetastoreService. + */ + com.google.protobuf.ByteString getDataprocMetastoreServiceBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinity.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinity.java new file mode 100644 index 00000000..0fbcac3b --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinity.java @@ -0,0 +1,683 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * Node Group Affinity for clusters using sole-tenant node groups.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.NodeGroupAffinity} + */ +public final class NodeGroupAffinity extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.NodeGroupAffinity) + NodeGroupAffinityOrBuilder { + private static final long serialVersionUID = 0L; + // Use NodeGroupAffinity.newBuilder() to construct. + private NodeGroupAffinity(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private NodeGroupAffinity() { + nodeGroupUri_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new NodeGroupAffinity(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private NodeGroupAffinity( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + nodeGroupUri_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.NodeGroupAffinity.class, + com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder.class); + } + + public static final int NODE_GROUP_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object nodeGroupUri_; + /** + * + * + *
+   * Required. The URI of a
+   * sole-tenant [node group
+   * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+   * that the cluster will be created on.
+   * A full URL, partial URI, or node group name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `node-group-1`
+   * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The nodeGroupUri. + */ + @java.lang.Override + public java.lang.String getNodeGroupUri() { + java.lang.Object ref = nodeGroupUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nodeGroupUri_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The URI of a
+   * sole-tenant [node group
+   * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+   * that the cluster will be created on.
+   * A full URL, partial URI, or node group name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `node-group-1`
+   * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for nodeGroupUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNodeGroupUriBytes() { + java.lang.Object ref = nodeGroupUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nodeGroupUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNodeGroupUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, nodeGroupUri_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNodeGroupUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, nodeGroupUri_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.NodeGroupAffinity)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.NodeGroupAffinity other = + (com.google.cloud.dataproc.v1.NodeGroupAffinity) obj; + + if (!getNodeGroupUri().equals(other.getNodeGroupUri())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NODE_GROUP_URI_FIELD_NUMBER; + hash = (53 * hash) + getNodeGroupUri().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.NodeGroupAffinity prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Node Group Affinity for clusters using sole-tenant node groups.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.NodeGroupAffinity} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.NodeGroupAffinity) + com.google.cloud.dataproc.v1.NodeGroupAffinityOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.NodeGroupAffinity.class, + com.google.cloud.dataproc.v1.NodeGroupAffinity.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.NodeGroupAffinity.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + nodeGroupUri_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.NodeGroupAffinity getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.NodeGroupAffinity.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.NodeGroupAffinity build() { + com.google.cloud.dataproc.v1.NodeGroupAffinity result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.NodeGroupAffinity buildPartial() { + com.google.cloud.dataproc.v1.NodeGroupAffinity result = + new com.google.cloud.dataproc.v1.NodeGroupAffinity(this); + result.nodeGroupUri_ = nodeGroupUri_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.NodeGroupAffinity) { + return mergeFrom((com.google.cloud.dataproc.v1.NodeGroupAffinity) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.NodeGroupAffinity other) { + if (other == com.google.cloud.dataproc.v1.NodeGroupAffinity.getDefaultInstance()) return this; + if (!other.getNodeGroupUri().isEmpty()) { + nodeGroupUri_ = other.nodeGroupUri_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.NodeGroupAffinity parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.NodeGroupAffinity) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object nodeGroupUri_ = ""; + /** + * + * + *
+     * Required. The URI of a
+     * sole-tenant [node group
+     * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+     * that the cluster will be created on.
+     * A full URL, partial URI, or node group name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `node-group-1`
+     * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The nodeGroupUri. + */ + public java.lang.String getNodeGroupUri() { + java.lang.Object ref = nodeGroupUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nodeGroupUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The URI of a
+     * sole-tenant [node group
+     * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+     * that the cluster will be created on.
+     * A full URL, partial URI, or node group name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `node-group-1`
+     * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for nodeGroupUri. + */ + public com.google.protobuf.ByteString getNodeGroupUriBytes() { + java.lang.Object ref = nodeGroupUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nodeGroupUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The URI of a
+     * sole-tenant [node group
+     * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+     * that the cluster will be created on.
+     * A full URL, partial URI, or node group name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `node-group-1`
+     * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The nodeGroupUri to set. + * @return This builder for chaining. + */ + public Builder setNodeGroupUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + nodeGroupUri_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The URI of a
+     * sole-tenant [node group
+     * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+     * that the cluster will be created on.
+     * A full URL, partial URI, or node group name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `node-group-1`
+     * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearNodeGroupUri() { + + nodeGroupUri_ = getDefaultInstance().getNodeGroupUri(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The URI of a
+     * sole-tenant [node group
+     * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+     * that the cluster will be created on.
+     * A full URL, partial URI, or node group name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+     * * `node-group-1`
+     * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for nodeGroupUri to set. + * @return This builder for chaining. + */ + public Builder setNodeGroupUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + nodeGroupUri_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.NodeGroupAffinity) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.NodeGroupAffinity) + private static final com.google.cloud.dataproc.v1.NodeGroupAffinity DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.NodeGroupAffinity(); + } + + public static com.google.cloud.dataproc.v1.NodeGroupAffinity getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public NodeGroupAffinity parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NodeGroupAffinity(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.NodeGroupAffinity getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinityOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinityOrBuilder.java new file mode 100644 index 00000000..5ec58655 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinityOrBuilder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface NodeGroupAffinityOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.NodeGroupAffinity) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The URI of a
+   * sole-tenant [node group
+   * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+   * that the cluster will be created on.
+   * A full URL, partial URI, or node group name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `node-group-1`
+   * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The nodeGroupUri. + */ + java.lang.String getNodeGroupUri(); + /** + * + * + *
+   * Required. The URI of a
+   * sole-tenant [node group
+   * resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
+   * that the cluster will be created on.
+   * A full URL, partial URI, or node group name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
+   * * `node-group-1`
+   * 
+ * + * string node_group_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for nodeGroupUri. + */ + com.google.protobuf.ByteString getNodeGroupUriBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java index 959abd9f..f9103c2a 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java @@ -369,8 +369,8 @@ public JobTypeCase getJobTypeCase() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -400,8 +400,8 @@ public java.lang.String getStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1797,8 +1797,8 @@ public Builder clearJobType() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1827,8 +1827,8 @@ public java.lang.String getStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1857,8 +1857,8 @@ public com.google.protobuf.ByteString getStepIdBytes() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1886,8 +1886,8 @@ public Builder setStepId(java.lang.String value) { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1911,8 +1911,8 @@ public Builder clearStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java index 198e4fca..1b61d806 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java @@ -31,8 +31,8 @@ public interface OrderedJobOrBuilder * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -51,8 +51,8 @@ public interface OrderedJobOrBuilder * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryList.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryList.java index de0b236c..1f47fc8a 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryList.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryList.java @@ -124,9 +124,9 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
@@ -150,9 +150,9 @@ public com.google.protobuf.ProtocolStringList getQueriesList() {
    *
    *
    * 
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
@@ -176,9 +176,9 @@ public int getQueriesCount() {
    *
    *
    * 
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
@@ -203,9 +203,9 @@ public java.lang.String getQueries(int index) {
    *
    *
    * 
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
@@ -575,9 +575,9 @@ private void ensureQueriesIsMutable() {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -601,9 +601,9 @@ public com.google.protobuf.ProtocolStringList getQueriesList() {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -627,9 +627,9 @@ public int getQueriesCount() {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -654,9 +654,9 @@ public java.lang.String getQueries(int index) {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -681,9 +681,9 @@ public com.google.protobuf.ByteString getQueriesBytes(int index) {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -715,9 +715,9 @@ public Builder setQueries(int index, java.lang.String value) {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -748,9 +748,9 @@ public Builder addQueries(java.lang.String value) {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -778,9 +778,9 @@ public Builder addAllQueries(java.lang.Iterable values) {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
@@ -807,9 +807,9 @@ public Builder clearQueries() {
      *
      *
      * 
-     * Required. The queries to execute. You do not need to terminate a query
-     * with a semicolon. Multiple queries can be specified in one string
-     * by separating each with a semicolon. Here is an example of an Cloud
+     * Required. The queries to execute. You do not need to end a query expression
+     * with a semicolon. Multiple queries can be specified in one
+     * string by separating each with a semicolon. Here is an example of a
      * Dataproc API snippet that uses a QueryList to specify a HiveJob:
      *     "hiveJob": {
      *       "queryList": {
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryListOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryListOrBuilder.java
index 568b46aa..05bb09a0 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryListOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryListOrBuilder.java
@@ -27,9 +27,9 @@ public interface QueryListOrBuilder
    *
    *
    * 
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
@@ -51,9 +51,9 @@ public interface QueryListOrBuilder
    *
    *
    * 
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
@@ -75,9 +75,9 @@ public interface QueryListOrBuilder
    *
    *
    * 
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
@@ -100,9 +100,9 @@ public interface QueryListOrBuilder
    *
    *
    * 
-   * Required. The queries to execute. You do not need to terminate a query
-   * with a semicolon. Multiple queries can be specified in one string
-   * by separating each with a semicolon. Here is an example of an Cloud
+   * Required. The queries to execute. You do not need to end a query expression
+   * with a semicolon. Multiple queries can be specified in one
+   * string by separating each with a semicolon. Here is an example of a
    * Dataproc API snippet that uses a QueryList to specify a HiveJob:
    *     "hiveJob": {
    *       "queryList": {
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfig.java
index dade0e6c..1cf38ac7 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfig.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfig.java
@@ -22,7 +22,7 @@
  *
  *
  * 
- * Security related configuration, including Kerberos.
+ * Security related configuration, including encryption, Kerberos, etc.
  * 
* * Protobuf type {@code google.cloud.dataproc.v1.SecurityConfig} @@ -82,6 +82,22 @@ private SecurityConfig( kerberosConfig_ = subBuilder.buildPartial(); } + break; + } + case 18: + { + com.google.cloud.dataproc.v1.IdentityConfig.Builder subBuilder = null; + if (identityConfig_ != null) { + subBuilder = identityConfig_.toBuilder(); + } + identityConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.IdentityConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(identityConfig_); + identityConfig_ = subBuilder.buildPartial(); + } + break; } default: @@ -124,10 +140,12 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Kerberos related configuration.
+   * Optional. Kerberos related configuration.
    * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the kerberosConfig field is set. */ @@ -139,10 +157,12 @@ public boolean hasKerberosConfig() { * * *
-   * Kerberos related configuration.
+   * Optional. Kerberos related configuration.
    * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The kerberosConfig. */ @@ -156,16 +176,75 @@ public com.google.cloud.dataproc.v1.KerberosConfig getKerberosConfig() { * * *
-   * Kerberos related configuration.
+   * Optional. Kerberos related configuration.
    * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ @java.lang.Override public com.google.cloud.dataproc.v1.KerberosConfigOrBuilder getKerberosConfigOrBuilder() { return getKerberosConfig(); } + public static final int IDENTITY_CONFIG_FIELD_NUMBER = 2; + private com.google.cloud.dataproc.v1.IdentityConfig identityConfig_; + /** + * + * + *
+   * Optional. Identity related configuration, including service account based
+   * secure multi-tenancy user mappings.
+   * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the identityConfig field is set. + */ + @java.lang.Override + public boolean hasIdentityConfig() { + return identityConfig_ != null; + } + /** + * + * + *
+   * Optional. Identity related configuration, including service account based
+   * secure multi-tenancy user mappings.
+   * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The identityConfig. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.IdentityConfig getIdentityConfig() { + return identityConfig_ == null + ? com.google.cloud.dataproc.v1.IdentityConfig.getDefaultInstance() + : identityConfig_; + } + /** + * + * + *
+   * Optional. Identity related configuration, including service account based
+   * secure multi-tenancy user mappings.
+   * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.IdentityConfigOrBuilder getIdentityConfigOrBuilder() { + return getIdentityConfig(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -183,6 +262,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (kerberosConfig_ != null) { output.writeMessage(1, getKerberosConfig()); } + if (identityConfig_ != null) { + output.writeMessage(2, getIdentityConfig()); + } unknownFields.writeTo(output); } @@ -195,6 +277,9 @@ public int getSerializedSize() { if (kerberosConfig_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getKerberosConfig()); } + if (identityConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getIdentityConfig()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -215,6 +300,10 @@ public boolean equals(final java.lang.Object obj) { if (hasKerberosConfig()) { if (!getKerberosConfig().equals(other.getKerberosConfig())) return false; } + if (hasIdentityConfig() != other.hasIdentityConfig()) return false; + if (hasIdentityConfig()) { + if (!getIdentityConfig().equals(other.getIdentityConfig())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -230,6 +319,10 @@ public int hashCode() { hash = (37 * hash) + KERBEROS_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getKerberosConfig().hashCode(); } + if (hasIdentityConfig()) { + hash = (37 * hash) + IDENTITY_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getIdentityConfig().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -334,7 +427,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Security related configuration, including Kerberos.
+   * Security related configuration, including encryption, Kerberos, etc.
    * 
* * Protobuf type {@code google.cloud.dataproc.v1.SecurityConfig} @@ -381,6 +474,12 @@ public Builder clear() { kerberosConfig_ = null; kerberosConfigBuilder_ = null; } + if (identityConfigBuilder_ == null) { + identityConfig_ = null; + } else { + identityConfig_ = null; + identityConfigBuilder_ = null; + } return this; } @@ -413,6 +512,11 @@ public com.google.cloud.dataproc.v1.SecurityConfig buildPartial() { } else { result.kerberosConfig_ = kerberosConfigBuilder_.build(); } + if (identityConfigBuilder_ == null) { + result.identityConfig_ = identityConfig_; + } else { + result.identityConfig_ = identityConfigBuilder_.build(); + } onBuilt(); return result; } @@ -465,6 +569,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.SecurityConfig other) { if (other.hasKerberosConfig()) { mergeKerberosConfig(other.getKerberosConfig()); } + if (other.hasIdentityConfig()) { + mergeIdentityConfig(other.getIdentityConfig()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -504,10 +611,12 @@ public Builder mergeFrom( * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the kerberosConfig field is set. */ @@ -518,10 +627,12 @@ public boolean hasKerberosConfig() { * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The kerberosConfig. */ @@ -538,10 +649,12 @@ public com.google.cloud.dataproc.v1.KerberosConfig getKerberosConfig() { * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setKerberosConfig(com.google.cloud.dataproc.v1.KerberosConfig value) { if (kerberosConfigBuilder_ == null) { @@ -560,10 +673,12 @@ public Builder setKerberosConfig(com.google.cloud.dataproc.v1.KerberosConfig val * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setKerberosConfig( com.google.cloud.dataproc.v1.KerberosConfig.Builder builderForValue) { @@ -580,10 +695,12 @@ public Builder setKerberosConfig( * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder mergeKerberosConfig(com.google.cloud.dataproc.v1.KerberosConfig value) { if (kerberosConfigBuilder_ == null) { @@ -606,10 +723,12 @@ public Builder mergeKerberosConfig(com.google.cloud.dataproc.v1.KerberosConfig v * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder clearKerberosConfig() { if (kerberosConfigBuilder_ == null) { @@ -626,10 +745,12 @@ public Builder clearKerberosConfig() { * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.KerberosConfig.Builder getKerberosConfigBuilder() { @@ -640,10 +761,12 @@ public com.google.cloud.dataproc.v1.KerberosConfig.Builder getKerberosConfigBuil * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.KerberosConfigOrBuilder getKerberosConfigOrBuilder() { if (kerberosConfigBuilder_ != null) { @@ -658,10 +781,12 @@ public com.google.cloud.dataproc.v1.KerberosConfigOrBuilder getKerberosConfigOrB * * *
-     * Kerberos related configuration.
+     * Optional. Kerberos related configuration.
      * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.KerberosConfig, @@ -680,6 +805,219 @@ public com.google.cloud.dataproc.v1.KerberosConfigOrBuilder getKerberosConfigOrB return kerberosConfigBuilder_; } + private com.google.cloud.dataproc.v1.IdentityConfig identityConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.IdentityConfig, + com.google.cloud.dataproc.v1.IdentityConfig.Builder, + com.google.cloud.dataproc.v1.IdentityConfigOrBuilder> + identityConfigBuilder_; + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the identityConfig field is set. + */ + public boolean hasIdentityConfig() { + return identityConfigBuilder_ != null || identityConfig_ != null; + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The identityConfig. + */ + public com.google.cloud.dataproc.v1.IdentityConfig getIdentityConfig() { + if (identityConfigBuilder_ == null) { + return identityConfig_ == null + ? com.google.cloud.dataproc.v1.IdentityConfig.getDefaultInstance() + : identityConfig_; + } else { + return identityConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIdentityConfig(com.google.cloud.dataproc.v1.IdentityConfig value) { + if (identityConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + identityConfig_ = value; + onChanged(); + } else { + identityConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIdentityConfig( + com.google.cloud.dataproc.v1.IdentityConfig.Builder builderForValue) { + if (identityConfigBuilder_ == null) { + identityConfig_ = builderForValue.build(); + onChanged(); + } else { + identityConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeIdentityConfig(com.google.cloud.dataproc.v1.IdentityConfig value) { + if (identityConfigBuilder_ == null) { + if (identityConfig_ != null) { + identityConfig_ = + com.google.cloud.dataproc.v1.IdentityConfig.newBuilder(identityConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + identityConfig_ = value; + } + onChanged(); + } else { + identityConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearIdentityConfig() { + if (identityConfigBuilder_ == null) { + identityConfig_ = null; + onChanged(); + } else { + identityConfig_ = null; + identityConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.IdentityConfig.Builder getIdentityConfigBuilder() { + + onChanged(); + return getIdentityConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.IdentityConfigOrBuilder getIdentityConfigOrBuilder() { + if (identityConfigBuilder_ != null) { + return identityConfigBuilder_.getMessageOrBuilder(); + } else { + return identityConfig_ == null + ? com.google.cloud.dataproc.v1.IdentityConfig.getDefaultInstance() + : identityConfig_; + } + } + /** + * + * + *
+     * Optional. Identity related configuration, including service account based
+     * secure multi-tenancy user mappings.
+     * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.IdentityConfig, + com.google.cloud.dataproc.v1.IdentityConfig.Builder, + com.google.cloud.dataproc.v1.IdentityConfigOrBuilder> + getIdentityConfigFieldBuilder() { + if (identityConfigBuilder_ == null) { + identityConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.IdentityConfig, + com.google.cloud.dataproc.v1.IdentityConfig.Builder, + com.google.cloud.dataproc.v1.IdentityConfigOrBuilder>( + getIdentityConfig(), getParentForChildren(), isClean()); + identityConfig_ = null; + } + return identityConfigBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfigOrBuilder.java index c0b788a2..15cabbf9 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfigOrBuilder.java @@ -27,10 +27,12 @@ public interface SecurityConfigOrBuilder * * *
-   * Kerberos related configuration.
+   * Optional. Kerberos related configuration.
    * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the kerberosConfig field is set. */ @@ -39,10 +41,12 @@ public interface SecurityConfigOrBuilder * * *
-   * Kerberos related configuration.
+   * Optional. Kerberos related configuration.
    * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The kerberosConfig. */ @@ -51,10 +55,56 @@ public interface SecurityConfigOrBuilder * * *
-   * Kerberos related configuration.
+   * Optional. Kerberos related configuration.
    * 
* - * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1; + * + * .google.cloud.dataproc.v1.KerberosConfig kerberos_config = 1 [(.google.api.field_behavior) = OPTIONAL]; + * */ com.google.cloud.dataproc.v1.KerberosConfigOrBuilder getKerberosConfigOrBuilder(); + + /** + * + * + *
+   * Optional. Identity related configuration, including service account based
+   * secure multi-tenancy user mappings.
+   * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the identityConfig field is set. + */ + boolean hasIdentityConfig(); + /** + * + * + *
+   * Optional. Identity related configuration, including service account based
+   * secure multi-tenancy user mappings.
+   * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The identityConfig. + */ + com.google.cloud.dataproc.v1.IdentityConfig getIdentityConfig(); + /** + * + * + *
+   * Optional. Identity related configuration, including service account based
+   * secure multi-tenancy user mappings.
+   * 
+ * + * + * .google.cloud.dataproc.v1.IdentityConfig identity_config = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.IdentityConfigOrBuilder getIdentityConfigOrBuilder(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ServiceName.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ServiceName.java new file mode 100644 index 00000000..e5853518 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ServiceName.java @@ -0,0 +1,223 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ServiceName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_SERVICE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/services/{service}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String service; + + @Deprecated + protected ServiceName() { + project = null; + location = null; + service = null; + } + + private ServiceName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + service = Preconditions.checkNotNull(builder.getService()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getService() { + return service; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ServiceName of(String project, String location, String service) { + return newBuilder().setProject(project).setLocation(location).setService(service).build(); + } + + public static String format(String project, String location, String service) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setService(service) + .build() + .toString(); + } + + public static ServiceName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_SERVICE.validatedMatch( + formattedString, "ServiceName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("location"), matchMap.get("service")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ServiceName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_SERVICE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (service != null) { + fieldMapBuilder.put("service", service); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_SERVICE.instantiate( + "project", project, "location", location, "service", service); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null || getClass() == o.getClass()) { + ServiceName that = ((ServiceName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.service, that.service); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(service); + return h; + } + + /** Builder for projects/{project}/locations/{location}/services/{service}. */ + public static class Builder { + private String project; + private String location; + private String service; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getService() { + return service; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setService(String service) { + this.service = service; + return this; + } + + private Builder(ServiceName serviceName) { + project = serviceName.project; + location = serviceName.location; + service = serviceName.service; + } + + public ServiceName build() { + return new ServiceName(this); + } + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java index 923a4367..758072b2 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java @@ -37,21 +37,26 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { java.lang.String[] descriptorData = { "\n%google/cloud/dataproc/v1/shared.proto\022" + "\030google.cloud.dataproc.v1\032\034google/api/an" - + "notations.proto*|\n\tComponent\022\031\n\025COMPONEN" - + "T_UNSPECIFIED\020\000\022\014\n\010ANACONDA\020\005\022\020\n\014HIVE_WE" - + "BHCAT\020\003\022\013\n\007JUPYTER\020\001\022\n\n\006PRESTO\020\006\022\014\n\010ZEPP" - + "ELIN\020\004\022\r\n\tZOOKEEPER\020\010Bo\n\034com.google.clou" - + "d.dataproc.v1B\013SharedProtoP\001Z@google.gol" - + "ang.org/genproto/googleapis/cloud/datapr" - + "oc/v1;dataprocb\006proto3" + + "notations.proto\032\037google/api/field_behavi" + + "or.proto*\277\001\n\tComponent\022\031\n\025COMPONENT_UNSP" + + "ECIFIED\020\000\022\014\n\010ANACONDA\020\005\022\n\n\006DOCKER\020\r\022\t\n\005D" + + "RUID\020\t\022\t\n\005FLINK\020\016\022\t\n\005HBASE\020\013\022\020\n\014HIVE_WEB" + + "HCAT\020\003\022\013\n\007JUPYTER\020\001\022\n\n\006PRESTO\020\006\022\n\n\006RANGE" + + "R\020\014\022\010\n\004SOLR\020\n\022\014\n\010ZEPPELIN\020\004\022\r\n\tZOOKEEPER" + + "\020\010Bo\n\034com.google.cloud.dataproc.v1B\013Shar" + + "edProtoP\001Z@google.golang.org/genproto/go" + + "ogleapis/cloud/dataproc/v1;dataprocb\006pro" + + "to3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), }); com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfig.java new file mode 100644 index 00000000..01d8b14f --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfig.java @@ -0,0 +1,728 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * Shielded Instance Config for clusters using [Compute Engine Shielded
+ * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ShieldedInstanceConfig} + */ +public final class ShieldedInstanceConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.ShieldedInstanceConfig) + ShieldedInstanceConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use ShieldedInstanceConfig.newBuilder() to construct. + private ShieldedInstanceConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ShieldedInstanceConfig() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ShieldedInstanceConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ShieldedInstanceConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enableSecureBoot_ = input.readBool(); + break; + } + case 16: + { + enableVtpm_ = input.readBool(); + break; + } + case 24: + { + enableIntegrityMonitoring_ = input.readBool(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.class, + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder.class); + } + + public static final int ENABLE_SECURE_BOOT_FIELD_NUMBER = 1; + private boolean enableSecureBoot_; + /** + * + * + *
+   * Optional. Defines whether instances have Secure Boot enabled.
+   * 
+ * + * bool enable_secure_boot = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableSecureBoot. + */ + @java.lang.Override + public boolean getEnableSecureBoot() { + return enableSecureBoot_; + } + + public static final int ENABLE_VTPM_FIELD_NUMBER = 2; + private boolean enableVtpm_; + /** + * + * + *
+   * Optional. Defines whether instances have the vTPM enabled.
+   * 
+ * + * bool enable_vtpm = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableVtpm. + */ + @java.lang.Override + public boolean getEnableVtpm() { + return enableVtpm_; + } + + public static final int ENABLE_INTEGRITY_MONITORING_FIELD_NUMBER = 3; + private boolean enableIntegrityMonitoring_; + /** + * + * + *
+   * Optional. Defines whether instances have integrity monitoring enabled.
+   * 
+ * + * bool enable_integrity_monitoring = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableIntegrityMonitoring. + */ + @java.lang.Override + public boolean getEnableIntegrityMonitoring() { + return enableIntegrityMonitoring_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enableSecureBoot_ != false) { + output.writeBool(1, enableSecureBoot_); + } + if (enableVtpm_ != false) { + output.writeBool(2, enableVtpm_); + } + if (enableIntegrityMonitoring_ != false) { + output.writeBool(3, enableIntegrityMonitoring_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enableSecureBoot_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enableSecureBoot_); + } + if (enableVtpm_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, enableVtpm_); + } + if (enableIntegrityMonitoring_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, enableIntegrityMonitoring_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.ShieldedInstanceConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.ShieldedInstanceConfig other = + (com.google.cloud.dataproc.v1.ShieldedInstanceConfig) obj; + + if (getEnableSecureBoot() != other.getEnableSecureBoot()) return false; + if (getEnableVtpm() != other.getEnableVtpm()) return false; + if (getEnableIntegrityMonitoring() != other.getEnableIntegrityMonitoring()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLE_SECURE_BOOT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSecureBoot()); + hash = (37 * hash) + ENABLE_VTPM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableVtpm()); + hash = (37 * hash) + ENABLE_INTEGRITY_MONITORING_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableIntegrityMonitoring()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.ShieldedInstanceConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Shielded Instance Config for clusters using [Compute Engine Shielded
+   * VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ShieldedInstanceConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.ShieldedInstanceConfig) + com.google.cloud.dataproc.v1.ShieldedInstanceConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.class, + com.google.cloud.dataproc.v1.ShieldedInstanceConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.ShieldedInstanceConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + enableSecureBoot_ = false; + + enableVtpm_ = false; + + enableIntegrityMonitoring_ = false; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ShieldedInstanceConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.ShieldedInstanceConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ShieldedInstanceConfig build() { + com.google.cloud.dataproc.v1.ShieldedInstanceConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ShieldedInstanceConfig buildPartial() { + com.google.cloud.dataproc.v1.ShieldedInstanceConfig result = + new com.google.cloud.dataproc.v1.ShieldedInstanceConfig(this); + result.enableSecureBoot_ = enableSecureBoot_; + result.enableVtpm_ = enableVtpm_; + result.enableIntegrityMonitoring_ = enableIntegrityMonitoring_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.ShieldedInstanceConfig) { + return mergeFrom((com.google.cloud.dataproc.v1.ShieldedInstanceConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.ShieldedInstanceConfig other) { + if (other == com.google.cloud.dataproc.v1.ShieldedInstanceConfig.getDefaultInstance()) + return this; + if (other.getEnableSecureBoot() != false) { + setEnableSecureBoot(other.getEnableSecureBoot()); + } + if (other.getEnableVtpm() != false) { + setEnableVtpm(other.getEnableVtpm()); + } + if (other.getEnableIntegrityMonitoring() != false) { + setEnableIntegrityMonitoring(other.getEnableIntegrityMonitoring()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.ShieldedInstanceConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.dataproc.v1.ShieldedInstanceConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private boolean enableSecureBoot_; + /** + * + * + *
+     * Optional. Defines whether instances have Secure Boot enabled.
+     * 
+ * + * bool enable_secure_boot = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableSecureBoot. + */ + @java.lang.Override + public boolean getEnableSecureBoot() { + return enableSecureBoot_; + } + /** + * + * + *
+     * Optional. Defines whether instances have Secure Boot enabled.
+     * 
+ * + * bool enable_secure_boot = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enableSecureBoot to set. + * @return This builder for chaining. + */ + public Builder setEnableSecureBoot(boolean value) { + + enableSecureBoot_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Defines whether instances have Secure Boot enabled.
+     * 
+ * + * bool enable_secure_boot = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnableSecureBoot() { + + enableSecureBoot_ = false; + onChanged(); + return this; + } + + private boolean enableVtpm_; + /** + * + * + *
+     * Optional. Defines whether instances have the vTPM enabled.
+     * 
+ * + * bool enable_vtpm = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableVtpm. + */ + @java.lang.Override + public boolean getEnableVtpm() { + return enableVtpm_; + } + /** + * + * + *
+     * Optional. Defines whether instances have the vTPM enabled.
+     * 
+ * + * bool enable_vtpm = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enableVtpm to set. + * @return This builder for chaining. + */ + public Builder setEnableVtpm(boolean value) { + + enableVtpm_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Defines whether instances have the vTPM enabled.
+     * 
+ * + * bool enable_vtpm = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnableVtpm() { + + enableVtpm_ = false; + onChanged(); + return this; + } + + private boolean enableIntegrityMonitoring_; + /** + * + * + *
+     * Optional. Defines whether instances have integrity monitoring enabled.
+     * 
+ * + * bool enable_integrity_monitoring = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableIntegrityMonitoring. + */ + @java.lang.Override + public boolean getEnableIntegrityMonitoring() { + return enableIntegrityMonitoring_; + } + /** + * + * + *
+     * Optional. Defines whether instances have integrity monitoring enabled.
+     * 
+ * + * bool enable_integrity_monitoring = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The enableIntegrityMonitoring to set. + * @return This builder for chaining. + */ + public Builder setEnableIntegrityMonitoring(boolean value) { + + enableIntegrityMonitoring_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Defines whether instances have integrity monitoring enabled.
+     * 
+ * + * bool enable_integrity_monitoring = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearEnableIntegrityMonitoring() { + + enableIntegrityMonitoring_ = false; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.ShieldedInstanceConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ShieldedInstanceConfig) + private static final com.google.cloud.dataproc.v1.ShieldedInstanceConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.ShieldedInstanceConfig(); + } + + public static com.google.cloud.dataproc.v1.ShieldedInstanceConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ShieldedInstanceConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ShieldedInstanceConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ShieldedInstanceConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfigOrBuilder.java new file mode 100644 index 00000000..26e0a138 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfigOrBuilder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface ShieldedInstanceConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.ShieldedInstanceConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. Defines whether instances have Secure Boot enabled.
+   * 
+ * + * bool enable_secure_boot = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableSecureBoot. + */ + boolean getEnableSecureBoot(); + + /** + * + * + *
+   * Optional. Defines whether instances have the vTPM enabled.
+   * 
+ * + * bool enable_vtpm = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableVtpm. + */ + boolean getEnableVtpm(); + + /** + * + * + *
+   * Optional. Defines whether instances have integrity monitoring enabled.
+   * 
+ * + * bool enable_integrity_monitoring = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The enableIntegrityMonitoring. + */ + boolean getEnableIntegrityMonitoring(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequest.java new file mode 100644 index 00000000..e2fae913 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequest.java @@ -0,0 +1,1428 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A request to start a cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.StartClusterRequest} + */ +public final class StartClusterRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.StartClusterRequest) + StartClusterRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use StartClusterRequest.newBuilder() to construct. + private StartClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StartClusterRequest() { + projectId_ = ""; + region_ = ""; + clusterName_ = ""; + clusterUuid_ = ""; + requestId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StartClusterRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StartClusterRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + + clusterUuid_ = s; + break; + } + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + + requestId_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StartClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.StartClusterRequest.class, + com.google.cloud.dataproc.v1.StartClusterRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 2; + private volatile java.lang.Object region_; + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The region. + */ + @java.lang.Override + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for region. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 3; + private volatile java.lang.Object clusterName_; + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The clusterName. + */ + @java.lang.Override + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for clusterName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_UUID_FIELD_NUMBER = 4; + private volatile java.lang.Object clusterUuid_; + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterUuid. + */ + @java.lang.Override + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterUuid. + */ + @java.lang.Override + public com.google.protobuf.ByteString getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + private volatile java.lang.Object requestId_; + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, region_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, clusterUuid_); + } + if (!getRequestIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, requestId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, region_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, clusterUuid_); + } + if (!getRequestIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, requestId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.StartClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.StartClusterRequest other = + (com.google.cloud.dataproc.v1.StartClusterRequest) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getRegion().equals(other.getRegion())) return false; + if (!getClusterName().equals(other.getClusterName())) return false; + if (!getClusterUuid().equals(other.getClusterUuid())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; + hash = (53 * hash) + getClusterUuid().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.StartClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A request to start a cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.StartClusterRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.StartClusterRequest) + com.google.cloud.dataproc.v1.StartClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StartClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.StartClusterRequest.class, + com.google.cloud.dataproc.v1.StartClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.StartClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + clusterName_ = ""; + + clusterUuid_ = ""; + + requestId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StartClusterRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.StartClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StartClusterRequest build() { + com.google.cloud.dataproc.v1.StartClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StartClusterRequest buildPartial() { + com.google.cloud.dataproc.v1.StartClusterRequest result = + new com.google.cloud.dataproc.v1.StartClusterRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.clusterName_ = clusterName_; + result.clusterUuid_ = clusterUuid_; + result.requestId_ = requestId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.StartClusterRequest) { + return mergeFrom((com.google.cloud.dataproc.v1.StartClusterRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.StartClusterRequest other) { + if (other == com.google.cloud.dataproc.v1.StartClusterRequest.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (!other.getClusterUuid().isEmpty()) { + clusterUuid_ = other.clusterUuid_; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.StartClusterRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.StartClusterRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The region. + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for region. + */ + public com.google.protobuf.ByteString getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The region to set. + * @return This builder for chaining. + */ + public Builder setRegion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for region to set. + * @return This builder for chaining. + */ + public Builder setRegionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The clusterName. + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for clusterName. + */ + public com.google.protobuf.ByteString getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The clusterName to set. + * @return This builder for chaining. + */ + public Builder setClusterName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for clusterName to set. + * @return This builder for chaining. + */ + public Builder setClusterNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterUuid_ = ""; + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterUuid. + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterUuid. + */ + public com.google.protobuf.ByteString getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The clusterUuid to set. + * @return This builder for chaining. + */ + public Builder setClusterUuid(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterUuid_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearClusterUuid() { + + clusterUuid_ = getDefaultInstance().getClusterUuid(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for clusterUuid to set. + * @return This builder for chaining. + */ + public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterUuid_ = value; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + requestId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + + requestId_ = getDefaultInstance().getRequestId(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + requestId_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.StartClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.StartClusterRequest) + private static final com.google.cloud.dataproc.v1.StartClusterRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.StartClusterRequest(); + } + + public static com.google.cloud.dataproc.v1.StartClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StartClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StartClusterRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StartClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequestOrBuilder.java new file mode 100644 index 00000000..21333938 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequestOrBuilder.java @@ -0,0 +1,172 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface StartClusterRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.StartClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The region. + */ + java.lang.String getRegion(); + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for region. + */ + com.google.protobuf.ByteString getRegionBytes(); + + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The clusterName. + */ + java.lang.String getClusterName(); + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for clusterName. + */ + com.google.protobuf.ByteString getClusterNameBytes(); + + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterUuid. + */ + java.lang.String getClusterUuid(); + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterUuid. + */ + com.google.protobuf.ByteString getClusterUuidBytes(); + + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + java.lang.String getRequestId(); + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequest.java new file mode 100644 index 00000000..b4c62c13 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequest.java @@ -0,0 +1,1428 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A request to stop a cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.StopClusterRequest} + */ +public final class StopClusterRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.StopClusterRequest) + StopClusterRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use StopClusterRequest.newBuilder() to construct. + private StopClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StopClusterRequest() { + projectId_ = ""; + region_ = ""; + clusterName_ = ""; + clusterUuid_ = ""; + requestId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StopClusterRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StopClusterRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + + clusterUuid_ = s; + break; + } + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + + requestId_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StopClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.StopClusterRequest.class, + com.google.cloud.dataproc.v1.StopClusterRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 2; + private volatile java.lang.Object region_; + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The region. + */ + @java.lang.Override + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for region. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 3; + private volatile java.lang.Object clusterName_; + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The clusterName. + */ + @java.lang.Override + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for clusterName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_UUID_FIELD_NUMBER = 4; + private volatile java.lang.Object clusterUuid_; + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterUuid. + */ + @java.lang.Override + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterUuid. + */ + @java.lang.Override + public com.google.protobuf.ByteString getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + private volatile java.lang.Object requestId_; + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, region_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, clusterUuid_); + } + if (!getRequestIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, requestId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, region_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, clusterUuid_); + } + if (!getRequestIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, requestId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.StopClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.StopClusterRequest other = + (com.google.cloud.dataproc.v1.StopClusterRequest) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getRegion().equals(other.getRegion())) return false; + if (!getClusterName().equals(other.getClusterName())) return false; + if (!getClusterUuid().equals(other.getClusterUuid())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; + hash = (53 * hash) + getClusterUuid().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.StopClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A request to stop a cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.StopClusterRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.StopClusterRequest) + com.google.cloud.dataproc.v1.StopClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StopClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.StopClusterRequest.class, + com.google.cloud.dataproc.v1.StopClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.StopClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + clusterName_ = ""; + + clusterUuid_ = ""; + + requestId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StopClusterRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.StopClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StopClusterRequest build() { + com.google.cloud.dataproc.v1.StopClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StopClusterRequest buildPartial() { + com.google.cloud.dataproc.v1.StopClusterRequest result = + new com.google.cloud.dataproc.v1.StopClusterRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.clusterName_ = clusterName_; + result.clusterUuid_ = clusterUuid_; + result.requestId_ = requestId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.StopClusterRequest) { + return mergeFrom((com.google.cloud.dataproc.v1.StopClusterRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.StopClusterRequest other) { + if (other == com.google.cloud.dataproc.v1.StopClusterRequest.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (!other.getClusterUuid().isEmpty()) { + clusterUuid_ = other.clusterUuid_; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.StopClusterRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.StopClusterRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The region. + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for region. + */ + public com.google.protobuf.ByteString getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The region to set. + * @return This builder for chaining. + */ + public Builder setRegion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for region to set. + * @return This builder for chaining. + */ + public Builder setRegionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The clusterName. + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for clusterName. + */ + public com.google.protobuf.ByteString getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The clusterName to set. + * @return This builder for chaining. + */ + public Builder setClusterName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for clusterName to set. + * @return This builder for chaining. + */ + public Builder setClusterNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterUuid_ = ""; + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterUuid. + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterUuid. + */ + public com.google.protobuf.ByteString getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The clusterUuid to set. + * @return This builder for chaining. + */ + public Builder setClusterUuid(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterUuid_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearClusterUuid() { + + clusterUuid_ = getDefaultInstance().getClusterUuid(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Specifying the `cluster_uuid` means the RPC will fail
+     * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for clusterUuid to set. + * @return This builder for chaining. + */ + public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterUuid_ = value; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + requestId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + + requestId_ = getDefaultInstance().getRequestId(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two
+     * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+     * with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + requestId_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.StopClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.StopClusterRequest) + private static final com.google.cloud.dataproc.v1.StopClusterRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.StopClusterRequest(); + } + + public static com.google.cloud.dataproc.v1.StopClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StopClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StopClusterRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.StopClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequestOrBuilder.java new file mode 100644 index 00000000..db7d5089 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequestOrBuilder.java @@ -0,0 +1,172 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface StopClusterRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.StopClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + /** + * + * + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The region. + */ + java.lang.String getRegion(); + /** + * + * + *
+   * Required. The Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for region. + */ + com.google.protobuf.ByteString getRegionBytes(); + + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The clusterName. + */ + java.lang.String getClusterName(); + /** + * + * + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for clusterName. + */ + com.google.protobuf.ByteString getClusterNameBytes(); + + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The clusterUuid. + */ + java.lang.String getClusterUuid(); + /** + * + * + *
+   * Optional. Specifying the `cluster_uuid` means the RPC will fail
+   * (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for clusterUuid. + */ + com.google.protobuf.ByteString getClusterUuidBytes(); + + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + java.lang.String getRequestId(); + /** + * + * + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two
+   * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
+   * with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java index f09f3fd4..146b7e6c 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java @@ -295,8 +295,9 @@ public com.google.cloud.dataproc.v1.JobOrBuilder getJobOrBuilder() { * *
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -326,8 +327,9 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -1130,8 +1132,9 @@ public com.google.cloud.dataproc.v1.JobOrBuilder getJobOrBuilder() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1160,8 +1163,9 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1190,8 +1194,9 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1219,8 +1224,9 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1244,8 +1250,9 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java
index 89394a14..cc9dcd73 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java
@@ -115,8 +115,9 @@ public interface SubmitJobRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -135,8 +136,9 @@ public interface SubmitJobRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java
index 31d64261..5841367c 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java
@@ -228,10 +228,10 @@ public com.google.protobuf.ByteString getNameBytes() {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -277,10 +277,10 @@ public com.google.protobuf.ProtocolStringList getFieldsList() {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -326,10 +326,10 @@ public int getFieldsCount() {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -376,10 +376,10 @@ public java.lang.String getFields(int index) {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -1081,10 +1081,10 @@ private void ensureFieldsIsMutable() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1130,10 +1130,10 @@ public com.google.protobuf.ProtocolStringList getFieldsList() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1179,10 +1179,10 @@ public int getFieldsCount() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1229,10 +1229,10 @@ public java.lang.String getFields(int index) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1279,10 +1279,10 @@ public com.google.protobuf.ByteString getFieldsBytes(int index) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1336,10 +1336,10 @@ public Builder setFields(int index, java.lang.String value) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1392,10 +1392,10 @@ public Builder addFields(java.lang.String value) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1445,10 +1445,10 @@ public Builder addAllFields(java.lang.Iterable values) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1497,10 +1497,10 @@ public Builder clearFields() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java
index 5625583f..9a57e01c 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java
@@ -67,10 +67,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -114,10 +114,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -161,10 +161,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -209,10 +209,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java
index 20788914..0b73b240 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java
@@ -651,8 +651,9 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
@@ -682,8 +683,9 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
@@ -2576,8 +2578,9 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -2606,8 +2609,9 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -2636,8 +2640,9 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -2665,8 +2670,9 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
@@ -2690,8 +2696,9 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two
+     * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+     * with the same id, then the second request will be ignored and the
      * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
      * backend is returned.
      * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java
index cf39dac2..64c9de7b 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java
@@ -384,8 +384,9 @@ public interface UpdateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
@@ -404,8 +405,9 @@ public interface UpdateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two
+   * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
+   * with the same id, then the second request will be ignored and the
    * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
    * backend is returned.
    * It is recommended to always set this value to a
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadata.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadata.java
index 4d8f25d6..8903df0c 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadata.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadata.java
@@ -198,6 +198,51 @@ private WorkflowMetadata(
               java.lang.String s = input.readStringRequireUtf8();
 
               clusterUuid_ = s;
+              break;
+            }
+          case 98:
+            {
+              com.google.protobuf.Duration.Builder subBuilder = null;
+              if (dagTimeout_ != null) {
+                subBuilder = dagTimeout_.toBuilder();
+              }
+              dagTimeout_ =
+                  input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(dagTimeout_);
+                dagTimeout_ = subBuilder.buildPartial();
+              }
+
+              break;
+            }
+          case 106:
+            {
+              com.google.protobuf.Timestamp.Builder subBuilder = null;
+              if (dagStartTime_ != null) {
+                subBuilder = dagStartTime_.toBuilder();
+              }
+              dagStartTime_ =
+                  input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(dagStartTime_);
+                dagStartTime_ = subBuilder.buildPartial();
+              }
+
+              break;
+            }
+          case 114:
+            {
+              com.google.protobuf.Timestamp.Builder subBuilder = null;
+              if (dagEndTime_ != null) {
+                subBuilder = dagEndTime_.toBuilder();
+              }
+              dagEndTime_ =
+                  input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(dagEndTime_);
+                dagEndTime_ = subBuilder.buildPartial();
+              }
+
               break;
             }
           default:
@@ -1005,6 +1050,179 @@ public com.google.protobuf.ByteString getClusterUuidBytes() {
     }
   }
 
+  public static final int DAG_TIMEOUT_FIELD_NUMBER = 12;
+  private com.google.protobuf.Duration dagTimeout_;
+  /**
+   *
+   *
+   * 
+   * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+   * (see [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagTimeout field is set. + */ + @java.lang.Override + public boolean hasDagTimeout() { + return dagTimeout_ != null; + } + /** + * + * + *
+   * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+   * (see [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagTimeout. + */ + @java.lang.Override + public com.google.protobuf.Duration getDagTimeout() { + return dagTimeout_ == null ? com.google.protobuf.Duration.getDefaultInstance() : dagTimeout_; + } + /** + * + * + *
+   * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+   * (see [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getDagTimeoutOrBuilder() { + return getDagTimeout(); + } + + public static final int DAG_START_TIME_FIELD_NUMBER = 13; + private com.google.protobuf.Timestamp dagStartTime_; + /** + * + * + *
+   * Output only. DAG start time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG begins.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagStartTime field is set. + */ + @java.lang.Override + public boolean hasDagStartTime() { + return dagStartTime_ != null; + } + /** + * + * + *
+   * Output only. DAG start time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG begins.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagStartTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getDagStartTime() { + return dagStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : dagStartTime_; + } + /** + * + * + *
+   * Output only. DAG start time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG begins.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getDagStartTimeOrBuilder() { + return getDagStartTime(); + } + + public static final int DAG_END_TIME_FIELD_NUMBER = 14; + private com.google.protobuf.Timestamp dagEndTime_; + /** + * + * + *
+   * Output only. DAG end time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG ends.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagEndTime field is set. + */ + @java.lang.Override + public boolean hasDagEndTime() { + return dagEndTime_ != null; + } + /** + * + * + *
+   * Output only. DAG end time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG ends.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagEndTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getDagEndTime() { + return dagEndTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : dagEndTime_; + } + /** + * + * + *
+   * Output only. DAG end time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG ends.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getDagEndTimeOrBuilder() { + return getDagEndTime(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -1051,6 +1269,15 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!getClusterUuidBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 11, clusterUuid_); } + if (dagTimeout_ != null) { + output.writeMessage(12, getDagTimeout()); + } + if (dagStartTime_ != null) { + output.writeMessage(13, getDagStartTime()); + } + if (dagEndTime_ != null) { + output.writeMessage(14, getDagEndTime()); + } unknownFields.writeTo(output); } @@ -1100,6 +1327,15 @@ public int getSerializedSize() { if (!getClusterUuidBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(11, clusterUuid_); } + if (dagTimeout_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getDagTimeout()); + } + if (dagStartTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(13, getDagStartTime()); + } + if (dagEndTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(14, getDagEndTime()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1142,6 +1378,18 @@ public boolean equals(final java.lang.Object obj) { if (!getEndTime().equals(other.getEndTime())) return false; } if (!getClusterUuid().equals(other.getClusterUuid())) return false; + if (hasDagTimeout() != other.hasDagTimeout()) return false; + if (hasDagTimeout()) { + if (!getDagTimeout().equals(other.getDagTimeout())) return false; + } + if (hasDagStartTime() != other.hasDagStartTime()) return false; + if (hasDagStartTime()) { + if (!getDagStartTime().equals(other.getDagStartTime())) return false; + } + if (hasDagEndTime() != other.hasDagEndTime()) return false; + if (hasDagEndTime()) { + if (!getDagEndTime().equals(other.getDagEndTime())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1187,6 +1435,18 @@ public int hashCode() { } hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; hash = (53 * hash) + getClusterUuid().hashCode(); + if (hasDagTimeout()) { + hash = (37 * hash) + DAG_TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + getDagTimeout().hashCode(); + } + if (hasDagStartTime()) { + hash = (37 * hash) + DAG_START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getDagStartTime().hashCode(); + } + if (hasDagEndTime()) { + hash = (37 * hash) + DAG_END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getDagEndTime().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1393,6 +1653,24 @@ public Builder clear() { } clusterUuid_ = ""; + if (dagTimeoutBuilder_ == null) { + dagTimeout_ = null; + } else { + dagTimeout_ = null; + dagTimeoutBuilder_ = null; + } + if (dagStartTimeBuilder_ == null) { + dagStartTime_ = null; + } else { + dagStartTime_ = null; + dagStartTimeBuilder_ = null; + } + if (dagEndTimeBuilder_ == null) { + dagEndTime_ = null; + } else { + dagEndTime_ = null; + dagEndTimeBuilder_ = null; + } return this; } @@ -1453,6 +1731,21 @@ public com.google.cloud.dataproc.v1.WorkflowMetadata buildPartial() { result.endTime_ = endTimeBuilder_.build(); } result.clusterUuid_ = clusterUuid_; + if (dagTimeoutBuilder_ == null) { + result.dagTimeout_ = dagTimeout_; + } else { + result.dagTimeout_ = dagTimeoutBuilder_.build(); + } + if (dagStartTimeBuilder_ == null) { + result.dagStartTime_ = dagStartTime_; + } else { + result.dagStartTime_ = dagStartTimeBuilder_.build(); + } + if (dagEndTimeBuilder_ == null) { + result.dagEndTime_ = dagEndTime_; + } else { + result.dagEndTime_ = dagEndTimeBuilder_.build(); + } onBuilt(); return result; } @@ -1536,6 +1829,15 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.WorkflowMetadata other) { clusterUuid_ = other.clusterUuid_; onChanged(); } + if (other.hasDagTimeout()) { + mergeDagTimeout(other.getDagTimeout()); + } + if (other.hasDagStartTime()) { + mergeDagStartTime(other.getDagStartTime()); + } + if (other.hasDagEndTime()) { + mergeDagEndTime(other.getDagEndTime()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -3225,6 +3527,665 @@ public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) { return this; } + private com.google.protobuf.Duration dagTimeout_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + dagTimeoutBuilder_; + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagTimeout field is set. + */ + public boolean hasDagTimeout() { + return dagTimeoutBuilder_ != null || dagTimeout_ != null; + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagTimeout. + */ + public com.google.protobuf.Duration getDagTimeout() { + if (dagTimeoutBuilder_ == null) { + return dagTimeout_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : dagTimeout_; + } else { + return dagTimeoutBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDagTimeout(com.google.protobuf.Duration value) { + if (dagTimeoutBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + dagTimeout_ = value; + onChanged(); + } else { + dagTimeoutBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDagTimeout(com.google.protobuf.Duration.Builder builderForValue) { + if (dagTimeoutBuilder_ == null) { + dagTimeout_ = builderForValue.build(); + onChanged(); + } else { + dagTimeoutBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeDagTimeout(com.google.protobuf.Duration value) { + if (dagTimeoutBuilder_ == null) { + if (dagTimeout_ != null) { + dagTimeout_ = + com.google.protobuf.Duration.newBuilder(dagTimeout_).mergeFrom(value).buildPartial(); + } else { + dagTimeout_ = value; + } + onChanged(); + } else { + dagTimeoutBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearDagTimeout() { + if (dagTimeoutBuilder_ == null) { + dagTimeout_ = null; + onChanged(); + } else { + dagTimeout_ = null; + dagTimeoutBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Duration.Builder getDagTimeoutBuilder() { + + onChanged(); + return getDagTimeoutFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.DurationOrBuilder getDagTimeoutOrBuilder() { + if (dagTimeoutBuilder_ != null) { + return dagTimeoutBuilder_.getMessageOrBuilder(); + } else { + return dagTimeout_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : dagTimeout_; + } + } + /** + * + * + *
+     * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+     * (see [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getDagTimeoutFieldBuilder() { + if (dagTimeoutBuilder_ == null) { + dagTimeoutBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getDagTimeout(), getParentForChildren(), isClean()); + dagTimeout_ = null; + } + return dagTimeoutBuilder_; + } + + private com.google.protobuf.Timestamp dagStartTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + dagStartTimeBuilder_; + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagStartTime field is set. + */ + public boolean hasDagStartTime() { + return dagStartTimeBuilder_ != null || dagStartTime_ != null; + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagStartTime. + */ + public com.google.protobuf.Timestamp getDagStartTime() { + if (dagStartTimeBuilder_ == null) { + return dagStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : dagStartTime_; + } else { + return dagStartTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDagStartTime(com.google.protobuf.Timestamp value) { + if (dagStartTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + dagStartTime_ = value; + onChanged(); + } else { + dagStartTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDagStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (dagStartTimeBuilder_ == null) { + dagStartTime_ = builderForValue.build(); + onChanged(); + } else { + dagStartTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeDagStartTime(com.google.protobuf.Timestamp value) { + if (dagStartTimeBuilder_ == null) { + if (dagStartTime_ != null) { + dagStartTime_ = + com.google.protobuf.Timestamp.newBuilder(dagStartTime_) + .mergeFrom(value) + .buildPartial(); + } else { + dagStartTime_ = value; + } + onChanged(); + } else { + dagStartTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearDagStartTime() { + if (dagStartTimeBuilder_ == null) { + dagStartTime_ = null; + onChanged(); + } else { + dagStartTime_ = null; + dagStartTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getDagStartTimeBuilder() { + + onChanged(); + return getDagStartTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getDagStartTimeOrBuilder() { + if (dagStartTimeBuilder_ != null) { + return dagStartTimeBuilder_.getMessageOrBuilder(); + } else { + return dagStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : dagStartTime_; + } + } + /** + * + * + *
+     * Output only. DAG start time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG begins.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getDagStartTimeFieldBuilder() { + if (dagStartTimeBuilder_ == null) { + dagStartTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getDagStartTime(), getParentForChildren(), isClean()); + dagStartTime_ = null; + } + return dagStartTimeBuilder_; + } + + private com.google.protobuf.Timestamp dagEndTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + dagEndTimeBuilder_; + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagEndTime field is set. + */ + public boolean hasDagEndTime() { + return dagEndTimeBuilder_ != null || dagEndTime_ != null; + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagEndTime. + */ + public com.google.protobuf.Timestamp getDagEndTime() { + if (dagEndTimeBuilder_ == null) { + return dagEndTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : dagEndTime_; + } else { + return dagEndTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDagEndTime(com.google.protobuf.Timestamp value) { + if (dagEndTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + dagEndTime_ = value; + onChanged(); + } else { + dagEndTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setDagEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (dagEndTimeBuilder_ == null) { + dagEndTime_ = builderForValue.build(); + onChanged(); + } else { + dagEndTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeDagEndTime(com.google.protobuf.Timestamp value) { + if (dagEndTimeBuilder_ == null) { + if (dagEndTime_ != null) { + dagEndTime_ = + com.google.protobuf.Timestamp.newBuilder(dagEndTime_).mergeFrom(value).buildPartial(); + } else { + dagEndTime_ = value; + } + onChanged(); + } else { + dagEndTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearDagEndTime() { + if (dagEndTimeBuilder_ == null) { + dagEndTime_ = null; + onChanged(); + } else { + dagEndTime_ = null; + dagEndTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getDagEndTimeBuilder() { + + onChanged(); + return getDagEndTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getDagEndTimeOrBuilder() { + if (dagEndTimeBuilder_ != null) { + return dagEndTimeBuilder_.getMessageOrBuilder(); + } else { + return dagEndTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : dagEndTime_; + } + } + /** + * + * + *
+     * Output only. DAG end time, only set for workflows with
+     * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+     * DAG ends.
+     * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getDagEndTimeFieldBuilder() { + if (dagEndTimeBuilder_ == null) { + dagEndTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getDagEndTime(), getParentForChildren(), isClean()); + dagEndTime_ = null; + } + return dagEndTimeBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadataOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadataOrBuilder.java index 61c07982..30219062 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadataOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadataOrBuilder.java @@ -407,4 +407,142 @@ public interface WorkflowMetadataOrBuilder * @return The bytes for clusterUuid. */ com.google.protobuf.ByteString getClusterUuidBytes(); + + /** + * + * + *
+   * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+   * (see [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagTimeout field is set. + */ + boolean hasDagTimeout(); + /** + * + * + *
+   * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+   * (see [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagTimeout. + */ + com.google.protobuf.Duration getDagTimeout(); + /** + * + * + *
+   * Output only. The timeout duration for the DAG of jobs, expressed in seconds
+   * (see [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.DurationOrBuilder getDagTimeoutOrBuilder(); + + /** + * + * + *
+   * Output only. DAG start time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG begins.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagStartTime field is set. + */ + boolean hasDagStartTime(); + /** + * + * + *
+   * Output only. DAG start time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG begins.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagStartTime. + */ + com.google.protobuf.Timestamp getDagStartTime(); + /** + * + * + *
+   * Output only. DAG start time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG begins.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_start_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getDagStartTimeOrBuilder(); + + /** + * + * + *
+   * Output only. DAG end time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG ends.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the dagEndTime field is set. + */ + boolean hasDagEndTime(); + /** + * + * + *
+   * Output only. DAG end time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG ends.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The dagEndTime. + */ + com.google.protobuf.Timestamp getDagEndTime(); + /** + * + * + *
+   * Output only. DAG end time, only set for workflows with
+   * [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
+   * DAG ends.
+   * 
+ * + * + * .google.protobuf.Timestamp dag_end_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getDagEndTimeOrBuilder(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java index f6fa56c0..3c2e2bf8 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java @@ -176,6 +176,21 @@ private WorkflowTemplate( com.google.cloud.dataproc.v1.TemplateParameter.parser(), extensionRegistry)); break; } + case 82: + { + com.google.protobuf.Duration.Builder subBuilder = null; + if (dagTimeout_ != null) { + subBuilder = dagTimeout_.toBuilder(); + } + dagTimeout_ = + input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(dagTimeout_); + dagTimeout_ = subBuilder.buildPartial(); + } + + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -802,6 +817,82 @@ public com.google.cloud.dataproc.v1.TemplateParameterOrBuilder getParametersOrBu return parameters_.get(index); } + public static final int DAG_TIMEOUT_FIELD_NUMBER = 10; + private com.google.protobuf.Duration dagTimeout_; + /** + * + * + *
+   * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+   * [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * The timeout duration must be from 10 minutes ("600s") to 24 hours
+   * ("86400s"). The timer begins when the first job is submitted. If the
+   * workflow is running at the end of the timeout period, any remaining jobs
+   * are cancelled, the workflow is ended, and if the workflow was running on a
+   * [managed
+   * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+   * the cluster is deleted.
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the dagTimeout field is set. + */ + @java.lang.Override + public boolean hasDagTimeout() { + return dagTimeout_ != null; + } + /** + * + * + *
+   * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+   * [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * The timeout duration must be from 10 minutes ("600s") to 24 hours
+   * ("86400s"). The timer begins when the first job is submitted. If the
+   * workflow is running at the end of the timeout period, any remaining jobs
+   * are cancelled, the workflow is ended, and if the workflow was running on a
+   * [managed
+   * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+   * the cluster is deleted.
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The dagTimeout. + */ + @java.lang.Override + public com.google.protobuf.Duration getDagTimeout() { + return dagTimeout_ == null ? com.google.protobuf.Duration.getDefaultInstance() : dagTimeout_; + } + /** + * + * + *
+   * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+   * [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * The timeout duration must be from 10 minutes ("600s") to 24 hours
+   * ("86400s"). The timer begins when the first job is submitted. If the
+   * workflow is running at the end of the timeout period, any remaining jobs
+   * are cancelled, the workflow is ended, and if the workflow was running on a
+   * [managed
+   * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+   * the cluster is deleted.
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getDagTimeoutOrBuilder() { + return getDagTimeout(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -842,6 +933,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io for (int i = 0; i < parameters_.size(); i++) { output.writeMessage(9, parameters_.get(i)); } + if (dagTimeout_ != null) { + output.writeMessage(10, getDagTimeout()); + } unknownFields.writeTo(output); } @@ -885,6 +979,9 @@ public int getSerializedSize() { for (int i = 0; i < parameters_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, parameters_.get(i)); } + if (dagTimeout_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getDagTimeout()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -919,6 +1016,10 @@ public boolean equals(final java.lang.Object obj) { } if (!getJobsList().equals(other.getJobsList())) return false; if (!getParametersList().equals(other.getParametersList())) return false; + if (hasDagTimeout() != other.hasDagTimeout()) return false; + if (hasDagTimeout()) { + if (!getDagTimeout().equals(other.getDagTimeout())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -960,6 +1061,10 @@ public int hashCode() { hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; hash = (53 * hash) + getParametersList().hashCode(); } + if (hasDagTimeout()) { + hash = (37 * hash) + DAG_TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + getDagTimeout().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1165,6 +1270,12 @@ public Builder clear() { } else { parametersBuilder_.clear(); } + if (dagTimeoutBuilder_ == null) { + dagTimeout_ = null; + } else { + dagTimeout_ = null; + dagTimeoutBuilder_ = null; + } return this; } @@ -1231,6 +1342,11 @@ public com.google.cloud.dataproc.v1.WorkflowTemplate buildPartial() { } else { result.parameters_ = parametersBuilder_.build(); } + if (dagTimeoutBuilder_ == null) { + result.dagTimeout_ = dagTimeout_; + } else { + result.dagTimeout_ = dagTimeoutBuilder_.build(); + } onBuilt(); return result; } @@ -1355,6 +1471,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.WorkflowTemplate other) { } } } + if (other.hasDagTimeout()) { + mergeDagTimeout(other.getDagTimeout()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -3303,6 +3422,279 @@ public com.google.cloud.dataproc.v1.TemplateParameter.Builder addParametersBuild return parametersBuilder_; } + private com.google.protobuf.Duration dagTimeout_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + dagTimeoutBuilder_; + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the dagTimeout field is set. + */ + public boolean hasDagTimeout() { + return dagTimeoutBuilder_ != null || dagTimeout_ != null; + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The dagTimeout. + */ + public com.google.protobuf.Duration getDagTimeout() { + if (dagTimeoutBuilder_ == null) { + return dagTimeout_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : dagTimeout_; + } else { + return dagTimeoutBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setDagTimeout(com.google.protobuf.Duration value) { + if (dagTimeoutBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + dagTimeout_ = value; + onChanged(); + } else { + dagTimeoutBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setDagTimeout(com.google.protobuf.Duration.Builder builderForValue) { + if (dagTimeoutBuilder_ == null) { + dagTimeout_ = builderForValue.build(); + onChanged(); + } else { + dagTimeoutBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeDagTimeout(com.google.protobuf.Duration value) { + if (dagTimeoutBuilder_ == null) { + if (dagTimeout_ != null) { + dagTimeout_ = + com.google.protobuf.Duration.newBuilder(dagTimeout_).mergeFrom(value).buildPartial(); + } else { + dagTimeout_ = value; + } + onChanged(); + } else { + dagTimeoutBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearDagTimeout() { + if (dagTimeoutBuilder_ == null) { + dagTimeout_ = null; + onChanged(); + } else { + dagTimeout_ = null; + dagTimeoutBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Duration.Builder getDagTimeoutBuilder() { + + onChanged(); + return getDagTimeoutFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getDagTimeoutOrBuilder() { + if (dagTimeoutBuilder_ != null) { + return dagTimeoutBuilder_.getMessageOrBuilder(); + } else { + return dagTimeout_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : dagTimeout_; + } + } + /** + * + * + *
+     * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+     * [JSON representation of
+     * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * The timeout duration must be from 10 minutes ("600s") to 24 hours
+     * ("86400s"). The timer begins when the first job is submitted. If the
+     * workflow is running at the end of the timeout period, any remaining jobs
+     * are cancelled, the workflow is ended, and if the workflow was running on a
+     * [managed
+     * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+     * the cluster is deleted.
+     * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getDagTimeoutFieldBuilder() { + if (dagTimeoutBuilder_ == null) { + dagTimeoutBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getDagTimeout(), getParentForChildren(), isClean()); + dagTimeout_ = null; + } + return dagTimeoutBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java index 524587ca..5ad42f18 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java @@ -438,4 +438,69 @@ public interface WorkflowTemplateOrBuilder * */ com.google.cloud.dataproc.v1.TemplateParameterOrBuilder getParametersOrBuilder(int index); + + /** + * + * + *
+   * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+   * [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * The timeout duration must be from 10 minutes ("600s") to 24 hours
+   * ("86400s"). The timer begins when the first job is submitted. If the
+   * workflow is running at the end of the timeout period, any remaining jobs
+   * are cancelled, the workflow is ended, and if the workflow was running on a
+   * [managed
+   * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+   * the cluster is deleted.
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the dagTimeout field is set. + */ + boolean hasDagTimeout(); + /** + * + * + *
+   * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+   * [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * The timeout duration must be from 10 minutes ("600s") to 24 hours
+   * ("86400s"). The timer begins when the first job is submitted. If the
+   * workflow is running at the end of the timeout period, any remaining jobs
+   * are cancelled, the workflow is ended, and if the workflow was running on a
+   * [managed
+   * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+   * the cluster is deleted.
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The dagTimeout. + */ + com.google.protobuf.Duration getDagTimeout(); + /** + * + * + *
+   * Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
+   * [JSON representation of
+   * duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * The timeout duration must be from 10 minutes ("600s") to 24 hours
+   * ("86400s"). The timer begins when the first job is submitted. If the
+   * workflow is running at the end of the timeout period, any remaining jobs
+   * are cancelled, the workflow is ended, and if the workflow was running on a
+   * [managed
+   * cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
+   * the cluster is deleted.
+   * 
+ * + * .google.protobuf.Duration dag_timeout = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.DurationOrBuilder getDagTimeoutOrBuilder(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java index c44ab556..b5036aaf 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java @@ -151,189 +151,195 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "proto\032\031google/api/resource.proto\032\'google" + "/cloud/dataproc/v1/clusters.proto\032#googl" + "e/cloud/dataproc/v1/jobs.proto\032#google/l" - + "ongrunning/operations.proto\032\033google/prot" - + "obuf/empty.proto\032\037google/protobuf/timest" - + "amp.proto\"\315\005\n\020WorkflowTemplate\022\017\n\002id\030\002 \001" - + "(\tB\003\340A\002\022\021\n\004name\030\001 \001(\tB\003\340A\003\022\024\n\007version\030\003 " - + "\001(\005B\003\340A\001\0224\n\013create_time\030\004 \001(\0132\032.google.p" - + "rotobuf.TimestampB\003\340A\003\0224\n\013update_time\030\005 " - + "\001(\0132\032.google.protobuf.TimestampB\003\340A\003\022K\n\006" - + "labels\030\006 \003(\01326.google.cloud.dataproc.v1." - + "WorkflowTemplate.LabelsEntryB\003\340A\001\022K\n\tpla" - + "cement\030\007 \001(\01323.google.cloud.dataproc.v1." - + "WorkflowTemplatePlacementB\003\340A\002\0227\n\004jobs\030\010" - + " \003(\0132$.google.cloud.dataproc.v1.OrderedJ" - + "obB\003\340A\002\022D\n\nparameters\030\t \003(\0132+.google.clo" - + "ud.dataproc.v1.TemplateParameterB\003\340A\001\032-\n" - + "\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t" - + ":\0028\001:\312\001\352A\306\001\n(dataproc.googleapis.com/Wor" - + "kflowTemplate\022Iprojects/{project}/region" - + "s/{region}/workflowTemplates/{workflow_t" - + "emplate}\022Mprojects/{project}/locations/{" - + "location}/workflowTemplates/{workflow_te" - + "mplate} \001\"\264\001\n\031WorkflowTemplatePlacement\022" - + "C\n\017managed_cluster\030\001 \001(\0132(.google.cloud." - + "dataproc.v1.ManagedClusterH\000\022E\n\020cluster_" - + "selector\030\002 \001(\0132).google.cloud.dataproc.v" - + "1.ClusterSelectorH\000B\013\n\tplacement\"\343\001\n\016Man" - + "agedCluster\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\022<" - + "\n\006config\030\003 \001(\0132\'.google.cloud.dataproc.v" - + "1.ClusterConfigB\003\340A\002\022I\n\006labels\030\004 \003(\01324.g" - + "oogle.cloud.dataproc.v1.ManagedCluster.L" - + "abelsEntryB\003\340A\001\032-\n\013LabelsEntry\022\013\n\003key\030\001 " - + "\001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\265\001\n\017ClusterSelect" - + "or\022\021\n\004zone\030\001 \001(\tB\003\340A\001\022Y\n\016cluster_labels\030" - + "\002 \003(\0132<.google.cloud.dataproc.v1.Cluster" - + "Selector.ClusterLabelsEntryB\003\340A\002\0324\n\022Clus" - + "terLabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001" - + "(\t:\0028\001\"\206\006\n\nOrderedJob\022\024\n\007step_id\030\001 \001(\tB\003" - + "\340A\002\022>\n\nhadoop_job\030\002 \001(\0132#.google.cloud.d" - + "ataproc.v1.HadoopJobB\003\340A\001H\000\022<\n\tspark_job" - + "\030\003 \001(\0132\".google.cloud.dataproc.v1.SparkJ" - + "obB\003\340A\001H\000\022@\n\013pyspark_job\030\004 \001(\0132$.google." - + "cloud.dataproc.v1.PySparkJobB\003\340A\001H\000\022:\n\010h" - + "ive_job\030\005 \001(\0132!.google.cloud.dataproc.v1" - + ".HiveJobB\003\340A\001H\000\0228\n\007pig_job\030\006 \001(\0132 .googl" - + "e.cloud.dataproc.v1.PigJobB\003\340A\001H\000\022?\n\013spa" - + "rk_r_job\030\013 \001(\0132#.google.cloud.dataproc.v" - + "1.SparkRJobB\003\340A\001H\000\022C\n\rspark_sql_job\030\007 \001(" - + "\0132%.google.cloud.dataproc.v1.SparkSqlJob" - + "B\003\340A\001H\000\022>\n\npresto_job\030\014 \001(\0132#.google.clo" - + "ud.dataproc.v1.PrestoJobB\003\340A\001H\000\022E\n\006label" - + "s\030\010 \003(\01320.google.cloud.dataproc.v1.Order" - + "edJob.LabelsEntryB\003\340A\001\022@\n\nscheduling\030\t \001" - + "(\0132\'.google.cloud.dataproc.v1.JobSchedul" - + "ingB\003\340A\001\022\"\n\025prerequisite_step_ids\030\n \003(\tB" - + "\003\340A\001\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005valu" - + "e\030\002 \001(\t:\0028\001B\n\n\010job_type\"\235\001\n\021TemplatePara" - + "meter\022\021\n\004name\030\001 \001(\tB\003\340A\002\022\023\n\006fields\030\002 \003(\t" - + "B\003\340A\002\022\030\n\013description\030\003 \001(\tB\003\340A\001\022F\n\nvalid" - + "ation\030\004 \001(\0132-.google.cloud.dataproc.v1.P" - + "arameterValidationB\003\340A\001\"\241\001\n\023ParameterVal" - + "idation\022:\n\005regex\030\001 \001(\0132).google.cloud.da" - + "taproc.v1.RegexValidationH\000\022;\n\006values\030\002 " - + "\001(\0132).google.cloud.dataproc.v1.ValueVali" - + "dationH\000B\021\n\017validation_type\"\'\n\017RegexVali" - + "dation\022\024\n\007regexes\030\001 \003(\tB\003\340A\002\"&\n\017ValueVal" - + "idation\022\023\n\006values\030\001 \003(\tB\003\340A\002\"\257\005\n\020Workflo" - + "wMetadata\022\025\n\010template\030\001 \001(\tB\003\340A\003\022\024\n\007vers" - + "ion\030\002 \001(\005B\003\340A\003\022G\n\016create_cluster\030\003 \001(\0132*" - + ".google.cloud.dataproc.v1.ClusterOperati" - + "onB\003\340A\003\022;\n\005graph\030\004 \001(\0132\'.google.cloud.da" - + "taproc.v1.WorkflowGraphB\003\340A\003\022G\n\016delete_c" - + "luster\030\005 \001(\0132*.google.cloud.dataproc.v1." - + "ClusterOperationB\003\340A\003\022D\n\005state\030\006 \001(\01620.g" - + "oogle.cloud.dataproc.v1.WorkflowMetadata" - + ".StateB\003\340A\003\022\031\n\014cluster_name\030\007 \001(\tB\003\340A\003\022N" - + "\n\nparameters\030\010 \003(\0132:.google.cloud.datapr" - + "oc.v1.WorkflowMetadata.ParametersEntry\0223" - + "\n\nstart_time\030\t \001(\0132\032.google.protobuf.Tim" - + "estampB\003\340A\003\0221\n\010end_time\030\n \001(\0132\032.google.p" - + "rotobuf.TimestampB\003\340A\003\022\031\n\014cluster_uuid\030\013" - + " \001(\tB\003\340A\003\0321\n\017ParametersEntry\022\013\n\003key\030\001 \001(" - + "\t\022\r\n\005value\030\002 \001(\t:\0028\001\"8\n\005State\022\013\n\007UNKNOWN" - + "\020\000\022\013\n\007PENDING\020\001\022\013\n\007RUNNING\020\002\022\010\n\004DONE\020\003\"T" - + "\n\020ClusterOperation\022\031\n\014operation_id\030\001 \001(\t" - + "B\003\340A\003\022\022\n\005error\030\002 \001(\tB\003\340A\003\022\021\n\004done\030\003 \001(\010B" - + "\003\340A\003\"K\n\rWorkflowGraph\022:\n\005nodes\030\001 \003(\0132&.g" - + "oogle.cloud.dataproc.v1.WorkflowNodeB\003\340A" - + "\003\"\243\002\n\014WorkflowNode\022\024\n\007step_id\030\001 \001(\tB\003\340A\003" - + "\022\"\n\025prerequisite_step_ids\030\002 \003(\tB\003\340A\003\022\023\n\006" - + "job_id\030\003 \001(\tB\003\340A\003\022D\n\005state\030\005 \001(\01620.googl" - + "e.cloud.dataproc.v1.WorkflowNode.NodeSta" - + "teB\003\340A\003\022\022\n\005error\030\006 \001(\tB\003\340A\003\"j\n\tNodeState" - + "\022\032\n\026NODE_STATE_UNSPECIFIED\020\000\022\013\n\007BLOCKED\020" - + "\001\022\014\n\010RUNNABLE\020\002\022\013\n\007RUNNING\020\003\022\r\n\tCOMPLETE" - + "D\020\004\022\n\n\006FAILED\020\005\"\244\001\n\035CreateWorkflowTempla" - + "teRequest\022@\n\006parent\030\001 \001(\tB0\340A\002\372A*\022(datap" - + "roc.googleapis.com/WorkflowTemplate\022A\n\010t" - + "emplate\030\002 \001(\0132*.google.cloud.dataproc.v1" - + ".WorkflowTemplateB\003\340A\002\"r\n\032GetWorkflowTem" - + "plateRequest\022>\n\004name\030\001 \001(\tB0\340A\002\372A*\n(data" - + "proc.googleapis.com/WorkflowTemplate\022\024\n\007" - + "version\030\002 \001(\005B\003\340A\001\"\255\002\n\"InstantiateWorkfl" - + "owTemplateRequest\022>\n\004name\030\001 \001(\tB0\340A\002\372A*\n" - + "(dataproc.googleapis.com/WorkflowTemplat" - + "e\022\024\n\007version\030\002 \001(\005B\003\340A\001\022\027\n\nrequest_id\030\005 " - + "\001(\tB\003\340A\001\022e\n\nparameters\030\006 \003(\0132L.google.cl" - + "oud.dataproc.v1.InstantiateWorkflowTempl" - + "ateRequest.ParametersEntryB\003\340A\001\0321\n\017Param" - + "etersEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\002" - + "8\001\"\310\001\n(InstantiateInlineWorkflowTemplate" - + "Request\022@\n\006parent\030\001 \001(\tB0\340A\002\372A*\022(datapro" - + "c.googleapis.com/WorkflowTemplate\022A\n\010tem" - + "plate\030\002 \001(\0132*.google.cloud.dataproc.v1.W" - + "orkflowTemplateB\003\340A\002\022\027\n\nrequest_id\030\003 \001(\t" - + "B\003\340A\001\"b\n\035UpdateWorkflowTemplateRequest\022A" - + "\n\010template\030\001 \001(\0132*.google.cloud.dataproc" - + ".v1.WorkflowTemplateB\003\340A\002\"\221\001\n\034ListWorkfl" - + "owTemplatesRequest\022@\n\006parent\030\001 \001(\tB0\340A\002\372" - + "A*\022(dataproc.googleapis.com/WorkflowTemp" - + "late\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npage_tok" - + "en\030\003 \001(\tB\003\340A\001\"\201\001\n\035ListWorkflowTemplatesR" - + "esponse\022B\n\ttemplates\030\001 \003(\0132*.google.clou" - + "d.dataproc.v1.WorkflowTemplateB\003\340A\003\022\034\n\017n" - + "ext_page_token\030\002 \001(\tB\003\340A\003\"u\n\035DeleteWorkf" - + "lowTemplateRequest\022>\n\004name\030\001 \001(\tB0\340A\002\372A*" - + "\n(dataproc.googleapis.com/WorkflowTempla" - + "te\022\024\n\007version\030\002 \001(\005B\003\340A\0012\346\020\n\027WorkflowTem" - + "plateService\022\233\002\n\026CreateWorkflowTemplate\022" - + "7.google.cloud.dataproc.v1.CreateWorkflo" - + "wTemplateRequest\032*.google.cloud.dataproc" - + ".v1.WorkflowTemplate\"\233\001\202\323\344\223\002\202\001\"5/v1/{par" - + "ent=projects/*/locations/*}/workflowTemp" - + "lates:\010templateZ?\"3/v1/{parent=projects/" - + "*/regions/*}/workflowTemplates:\010template" - + "\332A\017parent,template\022\364\001\n\023GetWorkflowTempla" - + "te\0224.google.cloud.dataproc.v1.GetWorkflo" - + "wTemplateRequest\032*.google.cloud.dataproc" - + ".v1.WorkflowTemplate\"{\202\323\344\223\002n\0225/v1/{name=" - + "projects/*/locations/*/workflowTemplates" - + "/*}Z5\0223/v1/{name=projects/*/regions/*/wo" - + "rkflowTemplates/*}\332A\004name\022\325\002\n\033Instantiat" - + "eWorkflowTemplate\022<.google.cloud.datapro" - + "c.v1.InstantiateWorkflowTemplateRequest\032" - + "\035.google.longrunning.Operation\"\330\001\202\323\344\223\002\214\001" - + "\"A/v1/{name=projects/*/locations/*/workf" - + "lowTemplates/*}:instantiate:\001*ZD\"?/v1/{n" - + "ame=projects/*/regions/*/workflowTemplat" - + "es/*}:instantiate:\001*\332A\004name\332A\017name,param" - + "eters\312A)\n\025google.protobuf.Empty\022\020Workflo" - + "wMetadata\022\364\002\n!InstantiateInlineWorkflowT" - + "emplate\022B.google.cloud.dataproc.v1.Insta" - + "ntiateInlineWorkflowTemplateRequest\032\035.go" - + "ogle.longrunning.Operation\"\353\001\202\323\344\223\002\246\001\"G/v" - + "1/{parent=projects/*/locations/*}/workfl" - + "owTemplates:instantiateInline:\010templateZ" - + "Q\"E/v1/{parent=projects/*/regions/*}/wor" - + "kflowTemplates:instantiateInline:\010templa" - + "te\332A\017parent,template\312A)\n\025google.protobuf" - + ".Empty\022\020WorkflowMetadata\022\246\002\n\026UpdateWorkf" - + "lowTemplate\0227.google.cloud.dataproc.v1.U" - + "pdateWorkflowTemplateRequest\032*.google.cl" - + "oud.dataproc.v1.WorkflowTemplate\"\246\001\202\323\344\223\002" - + "\224\001\032>/v1/{template.name=projects/*/locati" - + "ons/*/workflowTemplates/*}:\010templateZH\032<" - + "/v1/{template.name=projects/*/regions/*/" - + "workflowTemplates/*}:\010template\332A\010templat" - + "e\022\207\002\n\025ListWorkflowTemplates\0226.google.clo" - + "ud.dataproc.v1.ListWorkflowTemplatesRequ" - + "est\0327.google.cloud.dataproc.v1.ListWorkf" - + "lowTemplatesResponse\"}\202\323\344\223\002n\0225/v1/{paren" - + "t=projects/*/locations/*}/workflowTempla" - + "tesZ5\0223/v1/{parent=projects/*/regions/*}" - + "/workflowTemplates\332A\006parent\022\346\001\n\026DeleteWo" - + "rkflowTemplate\0227.google.cloud.dataproc.v" - + "1.DeleteWorkflowTemplateRequest\032\026.google" - + ".protobuf.Empty\"{\202\323\344\223\002n*5/v1/{name=proje" - + "cts/*/locations/*/workflowTemplates/*}Z5" - + "*3/v1/{name=projects/*/regions/*/workflo" - + "wTemplates/*}\332A\004name\032K\312A\027dataproc.google" - + "apis.com\322A.https://www.googleapis.com/au" - + "th/cloud-platformBz\n\034com.google.cloud.da" - + "taproc.v1B\026WorkflowTemplatesProtoP\001Z@goo" - + "gle.golang.org/genproto/googleapis/cloud" - + "/dataproc/v1;dataprocb\006proto3" + + "ongrunning/operations.proto\032\036google/prot" + + "obuf/duration.proto\032\033google/protobuf/emp" + + "ty.proto\032\037google/protobuf/timestamp.prot" + + "o\"\202\006\n\020WorkflowTemplate\022\017\n\002id\030\002 \001(\tB\003\340A\002\022" + + "\021\n\004name\030\001 \001(\tB\003\340A\003\022\024\n\007version\030\003 \001(\005B\003\340A\001" + + "\0224\n\013create_time\030\004 \001(\0132\032.google.protobuf." + + "TimestampB\003\340A\003\0224\n\013update_time\030\005 \001(\0132\032.go" + + "ogle.protobuf.TimestampB\003\340A\003\022K\n\006labels\030\006" + + " \003(\01326.google.cloud.dataproc.v1.Workflow" + + "Template.LabelsEntryB\003\340A\001\022K\n\tplacement\030\007" + + " \001(\01323.google.cloud.dataproc.v1.Workflow" + + "TemplatePlacementB\003\340A\002\0227\n\004jobs\030\010 \003(\0132$.g" + + "oogle.cloud.dataproc.v1.OrderedJobB\003\340A\002\022" + + "D\n\nparameters\030\t \003(\0132+.google.cloud.datap" + + "roc.v1.TemplateParameterB\003\340A\001\0223\n\013dag_tim" + + "eout\030\n \001(\0132\031.google.protobuf.DurationB\003\340" + + "A\001\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030" + + "\002 \001(\t:\0028\001:\312\001\352A\306\001\n(dataproc.googleapis.co" + + "m/WorkflowTemplate\022Iprojects/{project}/r" + + "egions/{region}/workflowTemplates/{workf" + + "low_template}\022Mprojects/{project}/locati" + + "ons/{location}/workflowTemplates/{workfl" + + "ow_template} \001\"\264\001\n\031WorkflowTemplatePlace" + + "ment\022C\n\017managed_cluster\030\001 \001(\0132(.google.c" + + "loud.dataproc.v1.ManagedClusterH\000\022E\n\020clu" + + "ster_selector\030\002 \001(\0132).google.cloud.datap" + + "roc.v1.ClusterSelectorH\000B\013\n\tplacement\"\343\001" + + "\n\016ManagedCluster\022\031\n\014cluster_name\030\002 \001(\tB\003" + + "\340A\002\022<\n\006config\030\003 \001(\0132\'.google.cloud.datap" + + "roc.v1.ClusterConfigB\003\340A\002\022I\n\006labels\030\004 \003(" + + "\01324.google.cloud.dataproc.v1.ManagedClus" + + "ter.LabelsEntryB\003\340A\001\032-\n\013LabelsEntry\022\013\n\003k" + + "ey\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\265\001\n\017ClusterS" + + "elector\022\021\n\004zone\030\001 \001(\tB\003\340A\001\022Y\n\016cluster_la" + + "bels\030\002 \003(\0132<.google.cloud.dataproc.v1.Cl" + + "usterSelector.ClusterLabelsEntryB\003\340A\002\0324\n" + + "\022ClusterLabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005valu" + + "e\030\002 \001(\t:\0028\001\"\206\006\n\nOrderedJob\022\024\n\007step_id\030\001 " + + "\001(\tB\003\340A\002\022>\n\nhadoop_job\030\002 \001(\0132#.google.cl" + + "oud.dataproc.v1.HadoopJobB\003\340A\001H\000\022<\n\tspar" + + "k_job\030\003 \001(\0132\".google.cloud.dataproc.v1.S" + + "parkJobB\003\340A\001H\000\022@\n\013pyspark_job\030\004 \001(\0132$.go" + + "ogle.cloud.dataproc.v1.PySparkJobB\003\340A\001H\000" + + "\022:\n\010hive_job\030\005 \001(\0132!.google.cloud.datapr" + + "oc.v1.HiveJobB\003\340A\001H\000\0228\n\007pig_job\030\006 \001(\0132 ." + + "google.cloud.dataproc.v1.PigJobB\003\340A\001H\000\022?" + + "\n\013spark_r_job\030\013 \001(\0132#.google.cloud.datap" + + "roc.v1.SparkRJobB\003\340A\001H\000\022C\n\rspark_sql_job" + + "\030\007 \001(\0132%.google.cloud.dataproc.v1.SparkS" + + "qlJobB\003\340A\001H\000\022>\n\npresto_job\030\014 \001(\0132#.googl" + + "e.cloud.dataproc.v1.PrestoJobB\003\340A\001H\000\022E\n\006" + + "labels\030\010 \003(\01320.google.cloud.dataproc.v1." + + "OrderedJob.LabelsEntryB\003\340A\001\022@\n\nschedulin" + + "g\030\t \001(\0132\'.google.cloud.dataproc.v1.JobSc" + + "hedulingB\003\340A\001\022\"\n\025prerequisite_step_ids\030\n" + + " \003(\tB\003\340A\001\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\n\n\010job_type\"\235\001\n\021Templat" + + "eParameter\022\021\n\004name\030\001 \001(\tB\003\340A\002\022\023\n\006fields\030" + + "\002 \003(\tB\003\340A\002\022\030\n\013description\030\003 \001(\tB\003\340A\001\022F\n\n" + + "validation\030\004 \001(\0132-.google.cloud.dataproc" + + ".v1.ParameterValidationB\003\340A\001\"\241\001\n\023Paramet" + + "erValidation\022:\n\005regex\030\001 \001(\0132).google.clo" + + "ud.dataproc.v1.RegexValidationH\000\022;\n\006valu" + + "es\030\002 \001(\0132).google.cloud.dataproc.v1.Valu" + + "eValidationH\000B\021\n\017validation_type\"\'\n\017Rege" + + "xValidation\022\024\n\007regexes\030\001 \003(\tB\003\340A\002\"&\n\017Val" + + "ueValidation\022\023\n\006values\030\001 \003(\tB\003\340A\002\"\324\006\n\020Wo" + + "rkflowMetadata\022\025\n\010template\030\001 \001(\tB\003\340A\003\022\024\n" + + "\007version\030\002 \001(\005B\003\340A\003\022G\n\016create_cluster\030\003 " + + "\001(\0132*.google.cloud.dataproc.v1.ClusterOp" + + "erationB\003\340A\003\022;\n\005graph\030\004 \001(\0132\'.google.clo" + + "ud.dataproc.v1.WorkflowGraphB\003\340A\003\022G\n\016del" + + "ete_cluster\030\005 \001(\0132*.google.cloud.datapro" + + "c.v1.ClusterOperationB\003\340A\003\022D\n\005state\030\006 \001(" + + "\01620.google.cloud.dataproc.v1.WorkflowMet" + + "adata.StateB\003\340A\003\022\031\n\014cluster_name\030\007 \001(\tB\003" + + "\340A\003\022N\n\nparameters\030\010 \003(\0132:.google.cloud.d" + + "ataproc.v1.WorkflowMetadata.ParametersEn" + + "try\0223\n\nstart_time\030\t \001(\0132\032.google.protobu" + + "f.TimestampB\003\340A\003\0221\n\010end_time\030\n \001(\0132\032.goo" + + "gle.protobuf.TimestampB\003\340A\003\022\031\n\014cluster_u" + + "uid\030\013 \001(\tB\003\340A\003\0223\n\013dag_timeout\030\014 \001(\0132\031.go" + + "ogle.protobuf.DurationB\003\340A\003\0227\n\016dag_start" + + "_time\030\r \001(\0132\032.google.protobuf.TimestampB" + + "\003\340A\003\0225\n\014dag_end_time\030\016 \001(\0132\032.google.prot" + + "obuf.TimestampB\003\340A\003\0321\n\017ParametersEntry\022\013" + + "\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"8\n\005State\022" + + "\013\n\007UNKNOWN\020\000\022\013\n\007PENDING\020\001\022\013\n\007RUNNING\020\002\022\010" + + "\n\004DONE\020\003\"T\n\020ClusterOperation\022\031\n\014operatio" + + "n_id\030\001 \001(\tB\003\340A\003\022\022\n\005error\030\002 \001(\tB\003\340A\003\022\021\n\004d" + + "one\030\003 \001(\010B\003\340A\003\"K\n\rWorkflowGraph\022:\n\005nodes" + + "\030\001 \003(\0132&.google.cloud.dataproc.v1.Workfl" + + "owNodeB\003\340A\003\"\243\002\n\014WorkflowNode\022\024\n\007step_id\030" + + "\001 \001(\tB\003\340A\003\022\"\n\025prerequisite_step_ids\030\002 \003(" + + "\tB\003\340A\003\022\023\n\006job_id\030\003 \001(\tB\003\340A\003\022D\n\005state\030\005 \001" + + "(\01620.google.cloud.dataproc.v1.WorkflowNo" + + "de.NodeStateB\003\340A\003\022\022\n\005error\030\006 \001(\tB\003\340A\003\"j\n" + + "\tNodeState\022\032\n\026NODE_STATE_UNSPECIFIED\020\000\022\013" + + "\n\007BLOCKED\020\001\022\014\n\010RUNNABLE\020\002\022\013\n\007RUNNING\020\003\022\r" + + "\n\tCOMPLETED\020\004\022\n\n\006FAILED\020\005\"\244\001\n\035CreateWork" + + "flowTemplateRequest\022@\n\006parent\030\001 \001(\tB0\340A\002" + + "\372A*\022(dataproc.googleapis.com/WorkflowTem" + + "plate\022A\n\010template\030\002 \001(\0132*.google.cloud.d" + + "ataproc.v1.WorkflowTemplateB\003\340A\002\"r\n\032GetW" + + "orkflowTemplateRequest\022>\n\004name\030\001 \001(\tB0\340A" + + "\002\372A*\n(dataproc.googleapis.com/WorkflowTe" + + "mplate\022\024\n\007version\030\002 \001(\005B\003\340A\001\"\255\002\n\"Instant" + + "iateWorkflowTemplateRequest\022>\n\004name\030\001 \001(" + + "\tB0\340A\002\372A*\n(dataproc.googleapis.com/Workf" + + "lowTemplate\022\024\n\007version\030\002 \001(\005B\003\340A\001\022\027\n\nreq" + + "uest_id\030\005 \001(\tB\003\340A\001\022e\n\nparameters\030\006 \003(\0132L" + + ".google.cloud.dataproc.v1.InstantiateWor" + + "kflowTemplateRequest.ParametersEntryB\003\340A" + + "\001\0321\n\017ParametersEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005val" + + "ue\030\002 \001(\t:\0028\001\"\310\001\n(InstantiateInlineWorkfl" + + "owTemplateRequest\022@\n\006parent\030\001 \001(\tB0\340A\002\372A" + + "*\022(dataproc.googleapis.com/WorkflowTempl" + + "ate\022A\n\010template\030\002 \001(\0132*.google.cloud.dat" + + "aproc.v1.WorkflowTemplateB\003\340A\002\022\027\n\nreques" + + "t_id\030\003 \001(\tB\003\340A\001\"b\n\035UpdateWorkflowTemplat" + + "eRequest\022A\n\010template\030\001 \001(\0132*.google.clou" + + "d.dataproc.v1.WorkflowTemplateB\003\340A\002\"\221\001\n\034" + + "ListWorkflowTemplatesRequest\022@\n\006parent\030\001" + + " \001(\tB0\340A\002\372A*\022(dataproc.googleapis.com/Wo" + + "rkflowTemplate\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027" + + "\n\npage_token\030\003 \001(\tB\003\340A\001\"\201\001\n\035ListWorkflow" + + "TemplatesResponse\022B\n\ttemplates\030\001 \003(\0132*.g" + + "oogle.cloud.dataproc.v1.WorkflowTemplate" + + "B\003\340A\003\022\034\n\017next_page_token\030\002 \001(\tB\003\340A\003\"u\n\035D" + + "eleteWorkflowTemplateRequest\022>\n\004name\030\001 \001" + + "(\tB0\340A\002\372A*\n(dataproc.googleapis.com/Work" + + "flowTemplate\022\024\n\007version\030\002 \001(\005B\003\340A\0012\346\020\n\027W" + + "orkflowTemplateService\022\233\002\n\026CreateWorkflo" + + "wTemplate\0227.google.cloud.dataproc.v1.Cre" + + "ateWorkflowTemplateRequest\032*.google.clou" + + "d.dataproc.v1.WorkflowTemplate\"\233\001\202\323\344\223\002\202\001" + + "\"5/v1/{parent=projects/*/locations/*}/wo" + + "rkflowTemplates:\010templateZ?\"3/v1/{parent" + + "=projects/*/regions/*}/workflowTemplates" + + ":\010template\332A\017parent,template\022\364\001\n\023GetWork" + + "flowTemplate\0224.google.cloud.dataproc.v1." + + "GetWorkflowTemplateRequest\032*.google.clou" + + "d.dataproc.v1.WorkflowTemplate\"{\202\323\344\223\002n\0225" + + "/v1/{name=projects/*/locations/*/workflo" + + "wTemplates/*}Z5\0223/v1/{name=projects/*/re" + + "gions/*/workflowTemplates/*}\332A\004name\022\325\002\n\033" + + "InstantiateWorkflowTemplate\022<.google.clo" + + "ud.dataproc.v1.InstantiateWorkflowTempla" + + "teRequest\032\035.google.longrunning.Operation" + + "\"\330\001\202\323\344\223\002\214\001\"A/v1/{name=projects/*/locatio" + + "ns/*/workflowTemplates/*}:instantiate:\001*" + + "ZD\"?/v1/{name=projects/*/regions/*/workf" + + "lowTemplates/*}:instantiate:\001*\332A\004name\332A\017" + + "name,parameters\312A)\n\025google.protobuf.Empt" + + "y\022\020WorkflowMetadata\022\364\002\n!InstantiateInlin" + + "eWorkflowTemplate\022B.google.cloud.datapro" + + "c.v1.InstantiateInlineWorkflowTemplateRe" + + "quest\032\035.google.longrunning.Operation\"\353\001\202" + + "\323\344\223\002\246\001\"G/v1/{parent=projects/*/locations" + + "/*}/workflowTemplates:instantiateInline:" + + "\010templateZQ\"E/v1/{parent=projects/*/regi" + + "ons/*}/workflowTemplates:instantiateInli" + + "ne:\010template\332A\017parent,template\312A)\n\025googl" + + "e.protobuf.Empty\022\020WorkflowMetadata\022\246\002\n\026U" + + "pdateWorkflowTemplate\0227.google.cloud.dat" + + "aproc.v1.UpdateWorkflowTemplateRequest\032*" + + ".google.cloud.dataproc.v1.WorkflowTempla" + + "te\"\246\001\202\323\344\223\002\224\001\032>/v1/{template.name=project" + + "s/*/locations/*/workflowTemplates/*}:\010te" + + "mplateZH\032 user_service_account_mapping = 1 [(google.api.field_behavior) = REQUIRED]; +} + // Specifies the selection and config of software inside the cluster. message SoftwareConfig { // Optional. The version of software inside the cluster. It must be one of the @@ -698,9 +853,9 @@ message SoftwareConfig { message LifecycleConfig { // Optional. The duration to keep the cluster alive while idling (when no jobs // are running). Passing this threshold will cause the cluster to be - // deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON + // deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON // representation of - // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json). + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; // Either the exact time the cluster should be deleted at or @@ -724,6 +879,21 @@ message LifecycleConfig { google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; } +// Specifies a Metastore configuration. +message MetastoreConfig { + // Required. Resource name of an existing Dataproc Metastore service. + // + // Example: + // + // * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]` + string dataproc_metastore_service = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "metastore.googleapis.com/Service" + } + ]; +} + // Contains cluster daemon metrics, such as HDFS and YARN stats. // // **Beta Feature**: This report is available for testing purposes only. It may @@ -748,9 +918,9 @@ message CreateClusterRequest { // Required. The cluster to create. Cluster cluster = 2 [(google.api.field_behavior) = REQUIRED]; - // Optional. A unique id used to identify the request. If the server - // receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests with the same - // id, then the second request will be ignored and the + // Optional. A unique id used to identify the request. If the server receives two + // [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s + // with the same id, then the second request will be ignored and the // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend // is returned. // @@ -842,8 +1012,9 @@ message UpdateClusterRequest { google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = REQUIRED]; // Optional. A unique id used to identify the request. If the server - // receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests with the same - // id, then the second request will be ignored and the + // receives two + // [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s + // with the same id, then the second request will be ignored and the // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the // backend is returned. // @@ -855,6 +1026,68 @@ message UpdateClusterRequest { string request_id = 7 [(google.api.field_behavior) = OPTIONAL]; } +// A request to stop a cluster. +message StopClusterRequest { + // Required. The ID of the Google Cloud Platform project the + // cluster belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifying the `cluster_uuid` means the RPC will fail + // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique id used to identify the request. If the server + // receives two + // [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s + // with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the + // backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to start a cluster. +message StartClusterRequest { + // Required. The ID of the Google Cloud Platform project the + // cluster belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifying the `cluster_uuid` means the RPC will fail + // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique id used to identify the request. If the server + // receives two + // [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s + // with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the + // backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + // A request to delete a cluster. message DeleteClusterRequest { // Required. The ID of the Google Cloud Platform project that the cluster @@ -872,8 +1105,9 @@ message DeleteClusterRequest { string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A unique id used to identify the request. If the server - // receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests with the same - // id, then the second request will be ignored and the + // receives two + // [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s + // with the same id, then the second request will be ignored and the // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the // backend is returned. // diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto index 065530f3..f3521b5d 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto @@ -32,7 +32,8 @@ option java_package = "com.google.cloud.dataproc.v1"; // The JobController provides methods to manage jobs. service JobController { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Submits a job to a cluster. rpc SubmitJob(SubmitJobRequest) returns (Job) { @@ -44,7 +45,8 @@ service JobController { } // Submits job to a cluster. - rpc SubmitJobAsOperation(SubmitJobRequest) returns (google.longrunning.Operation) { + rpc SubmitJobAsOperation(SubmitJobRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation" body: "*" @@ -286,9 +288,9 @@ message PySparkJob { // A list of queries to run on a cluster. message QueryList { - // Required. The queries to execute. You do not need to terminate a query - // with a semicolon. Multiple queries can be specified in one string - // by separating each with a semicolon. Here is an example of an Cloud + // Required. The queries to execute. You do not need to end a query expression + // with a semicolon. Multiple queries can be specified in one + // string by separating each with a semicolon. Here is an example of a // Dataproc API snippet that uses a QueryList to specify a HiveJob: // // "hiveJob": { @@ -323,7 +325,8 @@ message HiveJob { // Optional. Mapping of query variable names to values (equivalent to the // Hive command: `SET name="value";`). - map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; + map script_variables = 4 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names and values, used to configure Hive. // Properties that conflict with values set by the Dataproc API may be @@ -352,7 +355,8 @@ message SparkSqlJob { // Optional. Mapping of query variable names to values (equivalent to the // Spark SQL command: SET `name="value";`). - map script_variables = 3 [(google.api.field_behavior) = OPTIONAL]; + map script_variables = 3 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure // Spark SQL's SparkConf. Properties that conflict with values set by the @@ -386,7 +390,8 @@ message PigJob { // Optional. Mapping of query variable names to values (equivalent to the Pig // command: `name=[value]`). - map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; + map script_variables = 4 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Pig. // Properties that conflict with values set by the Dataproc API may be @@ -479,6 +484,11 @@ message JobPlacement { // Output only. A cluster UUID generated by the Dataproc service when // the job is submitted. string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Cluster labels to identify a cluster where the job will be + // submitted. + map cluster_labels = 3 + [(google.api.field_behavior) = OPTIONAL]; } // Dataproc job status. @@ -557,7 +567,8 @@ message JobStatus { ]; // Output only. The time when this state was entered. - google.protobuf.Timestamp state_start_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp state_start_time = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information, which includes // status reported by the agent. @@ -566,8 +577,8 @@ message JobStatus { // Encapsulates the full scoping used to reference a job. message JobReference { - // Optional. The ID of the Google Cloud Platform project that the job belongs to. If - // specified, must match the request project ID. + // Optional. The ID of the Google Cloud Platform project that the job belongs + // to. If specified, must match the request project ID. string project_id = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The job ID, which must be unique within the project. @@ -677,22 +688,26 @@ message Job { JobStatus status = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The previous job status. - repeated JobStatus status_history = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated JobStatus status_history = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The collection of YARN applications spun up by this job. // // **Beta** Feature: This report is available for testing purposes only. It // may be changed before final release. - repeated YarnApplication yarn_applications = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated YarnApplication yarn_applications = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A URI pointing to the location of the stdout of the job's // driver program. - string driver_output_resource_uri = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; + string driver_output_resource_uri = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. If present, the location of miscellaneous control files // which may be used as part of job setup and handling. If not present, // control files may be placed in the same location as `driver_output_uri`. - string driver_control_files_uri = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + string driver_control_files_uri = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The labels to associate with this job. // Label **keys** must contain 1 to 63 characters, and must conform to @@ -711,8 +726,8 @@ message Job { // may be reused over time. string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. Indicates whether the job is completed. If the value is `false`, - // the job is still in progress. If `true`, the job is completed, and + // Output only. Indicates whether the job is completed. If the value is + // `false`, the job is still in progress. If `true`, the job is completed, and // `status.state` field will indicate if it was successful, failed, // or cancelled. bool done = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -721,7 +736,7 @@ message Job { // Job scheduling options. message JobScheduling { // Optional. Maximum number of times per hour a driver may be restarted as - // a result of driver terminating with non-zero code before job is + // a result of driver exiting with non-zero code before job is // reported failed. // // A job may be reported as thrashing if driver exits with non-zero code @@ -729,6 +744,11 @@ message JobScheduling { // // Maximum value is 10. int32 max_failures_per_hour = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum number of times in total a driver may be restarted as a + // result of driver exiting with non-zero code before job is reported failed. + // Maximum value is 240. + int32 max_failures_total = 2 [(google.api.field_behavior) = OPTIONAL]; } // A request to submit a job. @@ -744,8 +764,9 @@ message SubmitJobRequest { Job job = 2 [(google.api.field_behavior) = REQUIRED]; // Optional. A unique id used to identify the request. If the server - // receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests with the same - // id, then the second request will be ignored and the + // receives two + // [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s + // with the same id, then the second request will be ignored and the // first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend // is returned. // @@ -769,7 +790,8 @@ message JobMetadata { string operation_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Job submission time. - google.protobuf.Timestamp start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // A request to get the resource representation for a job in a project. @@ -822,7 +844,8 @@ message ListJobsRequest { // (default = match ALL jobs). // // If `filter` is provided, `jobStateMatcher` will be ignored. - JobStateMatcher job_state_matcher = 5 [(google.api.field_behavior) = OPTIONAL]; + JobStateMatcher job_state_matcher = 5 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A filter constraining the jobs to list. Filters are // case-sensitive and have the following syntax: @@ -862,7 +885,8 @@ message UpdateJobRequest { // labels, and the `PATCH` request body would specify the new // value. Note: Currently, labels is the only // field that can be updated. - google.protobuf.FieldMask update_mask = 5 [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 5 + [(google.api.field_behavior) = REQUIRED]; } // A list of jobs in a project. diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto index 7a1382f1..176e4535 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto @@ -17,6 +17,7 @@ syntax = "proto3"; package google.cloud.dataproc.v1; import "google/api/annotations.proto"; +import "google/api/field_behavior.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; option java_multiple_files = true; @@ -24,13 +25,30 @@ option java_outer_classname = "SharedProto"; option java_package = "com.google.cloud.dataproc.v1"; // Cluster components that can be activated. +// Next ID: 16. enum Component { // Unspecified component. Specifying this will cause Cluster creation to fail. COMPONENT_UNSPECIFIED = 0; - // The Anaconda python distribution. + // The Anaconda python distribution. The Anaconda component is not supported + // in the Dataproc + // 2.0 + // image. The 2.0 image is pre-installed with Miniconda. ANACONDA = 5; + // Docker + DOCKER = 13; + + // The Druid query engine. (alpha) + DRUID = 9; + + // Flink + FLINK = 14; + + // HBase. (beta) + HBASE = 11; + // The Hive Web HCatalog (the REST service for accessing HCatalog). HIVE_WEBHCAT = 3; @@ -40,6 +58,12 @@ enum Component { // The Presto query engine. PRESTO = 6; + // The Ranger service. + RANGER = 12; + + // The Solr service. + SOLR = 10; + // The Zeppelin notebook. ZEPPELIN = 4; diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto index 04f81004..ea5bfd05 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto @@ -23,6 +23,7 @@ import "google/api/resource.proto"; import "google/cloud/dataproc/v1/clusters.proto"; import "google/cloud/dataproc/v1/jobs.proto"; import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -35,10 +36,12 @@ option java_package = "com.google.cloud.dataproc.v1"; // Dataproc API. service WorkflowTemplateService { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Creates new workflow template. - rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) { + rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) + returns (WorkflowTemplate) { option (google.api.http) = { post: "/v1/{parent=projects/*/locations/*}/workflowTemplates" body: "template" @@ -54,7 +57,8 @@ service WorkflowTemplateService { // // Can retrieve previously instantiated template by specifying optional // version parameter. - rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) { + rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) + returns (WorkflowTemplate) { option (google.api.http) = { get: "/v1/{name=projects/*/locations/*/workflowTemplates/*}" additional_bindings { @@ -84,7 +88,8 @@ service WorkflowTemplateService { // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be // [Empty][google.protobuf.Empty]. - rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) { + rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/{name=projects/*/locations/*/workflowTemplates/*}:instantiate" body: "*" @@ -104,7 +109,8 @@ service WorkflowTemplateService { // Instantiates a template and begins execution. // // This method is equivalent to executing the sequence - // [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + // [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + // [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], // [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. // // The returned Operation can be used to track execution of @@ -125,7 +131,9 @@ service WorkflowTemplateService { // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be // [Empty][google.protobuf.Empty]. - rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) { + rpc InstantiateInlineWorkflowTemplate( + InstantiateInlineWorkflowTemplateRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline" body: "template" @@ -143,7 +151,8 @@ service WorkflowTemplateService { // Updates (replaces) workflow template. The updated template // must contain version that matches the current server version. - rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) { + rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) + returns (WorkflowTemplate) { option (google.api.http) = { put: "/v1/{template.name=projects/*/locations/*/workflowTemplates/*}" body: "template" @@ -156,7 +165,8 @@ service WorkflowTemplateService { } // Lists workflows that match the specified filter in the request. - rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) { + rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) + returns (ListWorkflowTemplatesResponse) { option (google.api.http) = { get: "/v1/{parent=projects/*/locations/*}/workflowTemplates" additional_bindings { @@ -167,7 +177,8 @@ service WorkflowTemplateService { } // Deletes a workflow template. It does not cancel in-progress workflows. - rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) { + rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) + returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1/{name=projects/*/locations/*/workflowTemplates/*}" additional_bindings { @@ -213,10 +224,12 @@ message WorkflowTemplate { int32 version = 3 [(google.api.field_behavior) = OPTIONAL]; // Output only. The time template was created. - google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp create_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The time template was last updated. - google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp update_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The labels to associate with this template. These labels // will be propagated to all jobs and clusters created by the workflow @@ -233,7 +246,8 @@ message WorkflowTemplate { map labels = 6 [(google.api.field_behavior) = OPTIONAL]; // Required. WorkflowTemplate scheduling information. - WorkflowTemplatePlacement placement = 7 [(google.api.field_behavior) = REQUIRED]; + WorkflowTemplatePlacement placement = 7 + [(google.api.field_behavior) = REQUIRED]; // Required. The Directed Acyclic Graph of Jobs to submit. repeated OrderedJob jobs = 8 [(google.api.field_behavior) = REQUIRED]; @@ -241,7 +255,21 @@ message WorkflowTemplate { // Optional. Template parameters whose values are substituted into the // template. Values for parameters must be provided when the template is // instantiated. - repeated TemplateParameter parameters = 9 [(google.api.field_behavior) = OPTIONAL]; + repeated TemplateParameter parameters = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Timeout duration for the DAG of jobs, expressed in seconds (see + // [JSON representation of + // duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The timeout duration must be from 10 minutes ("600s") to 24 hours + // ("86400s"). The timer begins when the first job is submitted. If the + // workflow is running at the end of the timeout period, any remaining jobs + // are cancelled, the workflow is ended, and if the workflow was running on a + // [managed + // cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), + // the cluster is deleted. + google.protobuf.Duration dag_timeout = 10 + [(google.api.field_behavior) = OPTIONAL]; } // Specifies workflow execution target. @@ -299,7 +327,8 @@ message ClusterSelector { // Required. The cluster labels. Cluster must have all labels // to match. - map cluster_labels = 2 [(google.api.field_behavior) = REQUIRED]; + map cluster_labels = 2 + [(google.api.field_behavior) = REQUIRED]; } // A job executed by the workflow. @@ -309,8 +338,8 @@ message OrderedJob { // // The step id is used as prefix for job id, as job // `goog-dataproc-workflow-step-id` label, and in - // [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - // steps. + // [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + // field from other steps. // // The id must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), and hyphens (-). Cannot begin or end with underscore @@ -361,7 +390,8 @@ message OrderedJob { // Optional. The optional list of prerequisite job step_ids. // If not specified, the job will start at the beginning of workflow. - repeated string prerequisite_step_ids = 10 [(google.api.field_behavior) = OPTIONAL]; + repeated string prerequisite_step_ids = 10 + [(google.api.field_behavior) = OPTIONAL]; } // A configurable parameter that replaces one or more fields in the template. @@ -387,10 +417,10 @@ message TemplateParameter { // A field is allowed to appear in at most one parameter's list of field // paths. // - // A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask]. - // For example, a field path that references the zone field of a workflow - // template's cluster selector would be specified as - // `placement.clusterSelector.zone`. + // A field path is similar in syntax to a + // [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a + // field path that references the zone field of a workflow template's cluster + // selector would be specified as `placement.clusterSelector.zone`. // // Also, field paths can reference fields using the following syntax: // @@ -497,13 +527,15 @@ message WorkflowMetadata { int32 version = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The create cluster operation metadata. - ClusterOperation create_cluster = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + ClusterOperation create_cluster = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The workflow graph. WorkflowGraph graph = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The delete cluster operation metadata. - ClusterOperation delete_cluster = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + ClusterOperation delete_cluster = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The workflow state. State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -515,13 +547,33 @@ message WorkflowMetadata { map parameters = 8; // Output only. Workflow start time. - google.protobuf.Timestamp start_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp start_time = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Workflow end time. - google.protobuf.Timestamp end_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp end_time = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The UUID of target cluster. string cluster_uuid = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The timeout duration for the DAG of jobs, expressed in seconds + // (see [JSON representation of + // duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration dag_timeout = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. DAG start time, only set for workflows with + // [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when + // DAG begins. + google.protobuf.Timestamp dag_start_time = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. DAG end time, only set for workflows with + // [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when + // DAG ends. + google.protobuf.Timestamp dag_end_time = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // The cluster operation triggered by a workflow. @@ -570,7 +622,8 @@ message WorkflowNode { string step_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Node's prerequisite nodes. - repeated string prerequisite_step_ids = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated string prerequisite_step_ids = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The job id; populated after the node enters RUNNING state. string job_id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -670,7 +723,7 @@ message InstantiateWorkflowTemplateRequest { string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. Map from parameter names to values that should be used for those - // parameters. Values may not exceed 100 characters. + // parameters. Values may not exceed 1000 characters. map parameters = 6 [(google.api.field_behavior) = OPTIONAL]; } @@ -746,7 +799,8 @@ message ListWorkflowTemplatesRequest { // A response to a request to list workflow templates in a project. message ListWorkflowTemplatesResponse { // Output only. WorkflowTemplates list. - repeated WorkflowTemplate templates = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated WorkflowTemplate templates = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. This token is included in the response if there are more // results to fetch. To fetch additional results, provide this value as the diff --git a/synth.metadata b/synth.metadata index f5ba3211..64fb85c3 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,23 +4,23 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/java-dataproc.git", - "sha": "c3213c6844ec26fc2ebb55e56ee6753d56d5542d" + "sha": "a208e54b7d7634d1ee2de3470f7c2a3e7b0180c6" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "1305ca41d554eb0725237561e34129373bb8cbc1", - "internalRef": "362856902" + "sha": "439f098cb730af18c8ae4e65971ea3badf45138a", + "internalRef": "367518225" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "1305ca41d554eb0725237561e34129373bb8cbc1", - "internalRef": "362856902" + "sha": "439f098cb730af18c8ae4e65971ea3badf45138a", + "internalRef": "367518225" } }, { @@ -223,6 +223,7 @@ "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterMetrics.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterMetricsOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterName.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperation.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperationMetadata.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperationMetadataOrBuilder.java", @@ -270,10 +271,14 @@ "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetJobRequestOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetWorkflowTemplateRequest.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetWorkflowTemplateRequestOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJob.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJobOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJob.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJobOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfig.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/IdentityConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupAutoscalingPolicyConfig.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupAutoscalingPolicyConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java", @@ -322,6 +327,10 @@ "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ManagedClusterOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ManagedGroupConfig.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ManagedGroupConfigOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfig.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/MetastoreConfigOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinity.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeGroupAffinityOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationAction.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationActionOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OperationsProto.java", @@ -344,7 +353,10 @@ "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinityOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfig.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfigOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ServiceName.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfig.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ShieldedInstanceConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJob.java", @@ -353,6 +365,10 @@ "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJob.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJobOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequest.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StartClusterRequestOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequest.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/StopClusterRequestOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java",