From 4556a3fc0de8e432c0abd12b5ee0d6b013448b36 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 21 Oct 2021 17:09:27 -0400 Subject: [PATCH] feat: Add support for dataproc BatchController service (#719) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add support for dataproc BatchController service PiperOrigin-RevId: 404333740 Source-Link: https://github.com/googleapis/googleapis/commit/5088bd7856a92022789c1d66f6fb9fe3252868ad Source-Link: https://github.com/googleapis/googleapis-gen/commit/44b870783c1943771bcdec748ae4d5208c3d7f0e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDRiODcwNzgzYzE5NDM3NzFiY2RlYzc0OGFlNGQ1MjA4YzNkN2YwZSJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../dataproc/v1/BatchControllerClient.java | 732 ++ .../dataproc/v1/BatchControllerSettings.java | 232 + .../cloud/dataproc/v1/gapic_metadata.json | 21 + .../cloud/dataproc/v1/package-info.java | 13 + .../dataproc/v1/stub/BatchControllerStub.java | 76 + .../v1/stub/BatchControllerStubSettings.java | 448 ++ .../GrpcBatchControllerCallableFactory.java | 113 + .../v1/stub/GrpcBatchControllerStub.java | 277 + .../v1/BatchControllerClientTest.java | 468 ++ .../dataproc/v1/MockBatchController.java | 59 + .../dataproc/v1/MockBatchControllerImpl.java | 143 + .../dataproc/v1/BatchControllerGrpc.java | 693 ++ .../com/google/cloud/dataproc/v1/Batch.java | 6770 +++++++++++++++++ .../google/cloud/dataproc/v1/BatchName.java | 223 + .../cloud/dataproc/v1/BatchOrBuilder.java | 694 ++ .../cloud/dataproc/v1/BatchesProto.java | 357 + .../cloud/dataproc/v1/CreateBatchRequest.java | 1364 ++++ .../v1/CreateBatchRequestOrBuilder.java | 164 + .../cloud/dataproc/v1/DeleteBatchRequest.java | 649 ++ .../v1/DeleteBatchRequestOrBuilder.java | 54 + .../cloud/dataproc/v1/GetBatchRequest.java | 648 ++ .../dataproc/v1/GetBatchRequestOrBuilder.java | 54 + .../cloud/dataproc/v1/ListBatchesRequest.java | 933 +++ .../v1/ListBatchesRequestOrBuilder.java | 96 + .../dataproc/v1/ListBatchesResponse.java | 1133 +++ .../v1/ListBatchesResponseOrBuilder.java | 103 + .../cloud/dataproc/v1/PySparkBatch.java | 2122 ++++++ .../dataproc/v1/PySparkBatchOrBuilder.java | 335 + .../google/cloud/dataproc/v1/SparkBatch.java | 2198 ++++++ .../dataproc/v1/SparkBatchOrBuilder.java | 332 + .../google/cloud/dataproc/v1/SparkRBatch.java | 1541 ++++ .../dataproc/v1/SparkRBatchOrBuilder.java | 225 + .../cloud/dataproc/v1/SparkSqlBatch.java | 1275 ++++ .../dataproc/v1/SparkSqlBatchOrBuilder.java | 165 + .../google/cloud/dataproc/v1/batches.proto | 372 + 35 files changed, 25082 insertions(+) create mode 100644 google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerClient.java create mode 100644 google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerSettings.java create mode 100644 google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStub.java create mode 100644 google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStubSettings.java create mode 100644 google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerCallableFactory.java create mode 100644 google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerStub.java create mode 100644 google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/BatchControllerClientTest.java create mode 100644 google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchController.java create mode 100644 google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchControllerImpl.java create mode 100644 grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchControllerGrpc.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Batch.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchName.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchesProto.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequest.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequestOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequest.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequestOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequest.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequestOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequest.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequestOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponse.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponseOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatch.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatchOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatch.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatchOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatch.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatchOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatch.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatchOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/batches.proto diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerClient.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerClient.java new file mode 100644 index 00000000..c438bdd1 --- /dev/null +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerClient.java @@ -0,0 +1,732 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1.stub.BatchControllerStub; +import com.google.cloud.dataproc.v1.stub.BatchControllerStubSettings; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsClient; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: The BatchController provides methods to manage batch workloads. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+ *   BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]");
+ *   Batch response = batchControllerClient.getBatch(name);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the BatchControllerClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BatchControllerSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * BatchControllerSettings batchControllerSettings =
+ *     BatchControllerSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BatchControllerClient batchControllerClient =
+ *     BatchControllerClient.create(batchControllerSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * BatchControllerSettings batchControllerSettings =
+ *     BatchControllerSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BatchControllerClient batchControllerClient =
+ *     BatchControllerClient.create(batchControllerSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class BatchControllerClient implements BackgroundResource { + private final BatchControllerSettings settings; + private final BatchControllerStub stub; + private final OperationsClient operationsClient; + + /** Constructs an instance of BatchControllerClient with default settings. */ + public static final BatchControllerClient create() throws IOException { + return create(BatchControllerSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BatchControllerClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BatchControllerClient create(BatchControllerSettings settings) + throws IOException { + return new BatchControllerClient(settings); + } + + /** + * Constructs an instance of BatchControllerClient, using the given stub for making calls. This is + * for advanced usage - prefer using create(BatchControllerSettings). + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final BatchControllerClient create(BatchControllerStub stub) { + return new BatchControllerClient(stub); + } + + /** + * Constructs an instance of BatchControllerClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BatchControllerClient(BatchControllerSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BatchControllerStubSettings) settings.getStubSettings()).createStub(); + this.operationsClient = OperationsClient.create(this.stub.getOperationsStub()); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected BatchControllerClient(BatchControllerStub stub) { + this.settings = null; + this.stub = stub; + this.operationsClient = OperationsClient.create(this.stub.getOperationsStub()); + } + + public final BatchControllerSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BatchControllerStub getStub() { + return stub; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + public final OperationsClient getOperationsClient() { + return operationsClient; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a batch workload that executes asynchronously. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]");
+   *   Batch batch = Batch.newBuilder().build();
+   *   String batchId = "batchId-331744779";
+   *   Batch response = batchControllerClient.createBatchAsync(parent, batch, batchId).get();
+   * }
+   * }
+ * + * @param parent Required. The parent resource where this batch will be created. + * @param batch Required. The batch to create. + * @param batchId Optional. The ID to use for the batch, which will become the final component of + * the batch's resource name. + *

This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createBatchAsync( + LocationName parent, Batch batch, String batchId) { + CreateBatchRequest request = + CreateBatchRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setBatch(batch) + .setBatchId(batchId) + .build(); + return createBatchAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a batch workload that executes asynchronously. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   String parent = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString();
+   *   Batch batch = Batch.newBuilder().build();
+   *   String batchId = "batchId-331744779";
+   *   Batch response = batchControllerClient.createBatchAsync(parent, batch, batchId).get();
+   * }
+   * }
+ * + * @param parent Required. The parent resource where this batch will be created. + * @param batch Required. The batch to create. + * @param batchId Optional. The ID to use for the batch, which will become the final component of + * the batch's resource name. + *

This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createBatchAsync( + String parent, Batch batch, String batchId) { + CreateBatchRequest request = + CreateBatchRequest.newBuilder() + .setParent(parent) + .setBatch(batch) + .setBatchId(batchId) + .build(); + return createBatchAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a batch workload that executes asynchronously. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   CreateBatchRequest request =
+   *       CreateBatchRequest.newBuilder()
+   *           .setParent(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .setBatch(Batch.newBuilder().build())
+   *           .setBatchId("batchId-331744779")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   Batch response = batchControllerClient.createBatchAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createBatchAsync( + CreateBatchRequest request) { + return createBatchOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a batch workload that executes asynchronously. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   CreateBatchRequest request =
+   *       CreateBatchRequest.newBuilder()
+   *           .setParent(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .setBatch(Batch.newBuilder().build())
+   *           .setBatchId("batchId-331744779")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       batchControllerClient.createBatchOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Batch response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + createBatchOperationCallable() { + return stub.createBatchOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a batch workload that executes asynchronously. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   CreateBatchRequest request =
+   *       CreateBatchRequest.newBuilder()
+   *           .setParent(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .setBatch(Batch.newBuilder().build())
+   *           .setBatchId("batchId-331744779")
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future = batchControllerClient.createBatchCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createBatchCallable() { + return stub.createBatchCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the batch workload resource representation. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]");
+   *   Batch response = batchControllerClient.getBatch(name);
+   * }
+   * }
+ * + * @param name Required. The name of the batch to retrieve. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Batch getBatch(BatchName name) { + GetBatchRequest request = + GetBatchRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getBatch(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the batch workload resource representation. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   String name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString();
+   *   Batch response = batchControllerClient.getBatch(name);
+   * }
+   * }
+ * + * @param name Required. The name of the batch to retrieve. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Batch getBatch(String name) { + GetBatchRequest request = GetBatchRequest.newBuilder().setName(name).build(); + return getBatch(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the batch workload resource representation. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   GetBatchRequest request =
+   *       GetBatchRequest.newBuilder()
+   *           .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .build();
+   *   Batch response = batchControllerClient.getBatch(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Batch getBatch(GetBatchRequest request) { + return getBatchCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the batch workload resource representation. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   GetBatchRequest request =
+   *       GetBatchRequest.newBuilder()
+   *           .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .build();
+   *   ApiFuture future = batchControllerClient.getBatchCallable().futureCall(request);
+   *   // Do something.
+   *   Batch response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getBatchCallable() { + return stub.getBatchCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists batch workloads. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]");
+   *   for (Batch element : batchControllerClient.listBatches(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent, which owns this collection of batches. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBatchesPagedResponse listBatches(LocationName parent) { + ListBatchesRequest request = + ListBatchesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listBatches(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists batch workloads. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   String parent = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString();
+   *   for (Batch element : batchControllerClient.listBatches(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent, which owns this collection of batches. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBatchesPagedResponse listBatches(String parent) { + ListBatchesRequest request = ListBatchesRequest.newBuilder().setParent(parent).build(); + return listBatches(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists batch workloads. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   ListBatchesRequest request =
+   *       ListBatchesRequest.newBuilder()
+   *           .setParent(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Batch element : batchControllerClient.listBatches(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBatchesPagedResponse listBatches(ListBatchesRequest request) { + return listBatchesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists batch workloads. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   ListBatchesRequest request =
+   *       ListBatchesRequest.newBuilder()
+   *           .setParent(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       batchControllerClient.listBatchesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Batch element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listBatchesPagedCallable() { + return stub.listBatchesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists batch workloads. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   ListBatchesRequest request =
+   *       ListBatchesRequest.newBuilder()
+   *           .setParent(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListBatchesResponse response = batchControllerClient.listBatchesCallable().call(request);
+   *     for (Batch element : response.getResponsesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listBatchesCallable() { + return stub.listBatchesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the batch workload resource. If the batch is not in terminal state, the delete fails + * and the response returns `FAILED_PRECONDITION`. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]");
+   *   batchControllerClient.deleteBatch(name);
+   * }
+   * }
+ * + * @param name Required. The name of the batch resource to delete. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBatch(BatchName name) { + DeleteBatchRequest request = + DeleteBatchRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteBatch(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the batch workload resource. If the batch is not in terminal state, the delete fails + * and the response returns `FAILED_PRECONDITION`. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   String name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString();
+   *   batchControllerClient.deleteBatch(name);
+   * }
+   * }
+ * + * @param name Required. The name of the batch resource to delete. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBatch(String name) { + DeleteBatchRequest request = DeleteBatchRequest.newBuilder().setName(name).build(); + deleteBatch(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the batch workload resource. If the batch is not in terminal state, the delete fails + * and the response returns `FAILED_PRECONDITION`. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   DeleteBatchRequest request =
+   *       DeleteBatchRequest.newBuilder()
+   *           .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .build();
+   *   batchControllerClient.deleteBatch(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBatch(DeleteBatchRequest request) { + deleteBatchCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the batch workload resource. If the batch is not in terminal state, the delete fails + * and the response returns `FAILED_PRECONDITION`. + * + *

Sample code: + * + *

{@code
+   * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+   *   DeleteBatchRequest request =
+   *       DeleteBatchRequest.newBuilder()
+   *           .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString())
+   *           .build();
+   *   ApiFuture future = batchControllerClient.deleteBatchCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteBatchCallable() { + return stub.deleteBatchCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListBatchesPagedResponse + extends AbstractPagedListResponse< + ListBatchesRequest, + ListBatchesResponse, + Batch, + ListBatchesPage, + ListBatchesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListBatchesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, input -> new ListBatchesPagedResponse(input), MoreExecutors.directExecutor()); + } + + private ListBatchesPagedResponse(ListBatchesPage page) { + super(page, ListBatchesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListBatchesPage + extends AbstractPage { + + private ListBatchesPage( + PageContext context, + ListBatchesResponse response) { + super(context, response); + } + + private static ListBatchesPage createEmptyPage() { + return new ListBatchesPage(null, null); + } + + @Override + protected ListBatchesPage createPage( + PageContext context, + ListBatchesResponse response) { + return new ListBatchesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListBatchesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListBatchesRequest, + ListBatchesResponse, + Batch, + ListBatchesPage, + ListBatchesFixedSizeCollection> { + + private ListBatchesFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListBatchesFixedSizeCollection createEmptyCollection() { + return new ListBatchesFixedSizeCollection(null, 0); + } + + @Override + protected ListBatchesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListBatchesFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerSettings.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerSettings.java new file mode 100644 index 00000000..0aceb61d --- /dev/null +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/BatchControllerSettings.java @@ -0,0 +1,232 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import static com.google.cloud.dataproc.v1.BatchControllerClient.ListBatchesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.dataproc.v1.stub.BatchControllerStubSettings; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BatchControllerClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of getBatch to 30 seconds: + * + *

{@code
+ * BatchControllerSettings.Builder batchControllerSettingsBuilder =
+ *     BatchControllerSettings.newBuilder();
+ * batchControllerSettingsBuilder
+ *     .getBatchSettings()
+ *     .setRetrySettings(
+ *         batchControllerSettingsBuilder
+ *             .getBatchSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setTotalTimeout(Duration.ofSeconds(30))
+ *             .build());
+ * BatchControllerSettings batchControllerSettings = batchControllerSettingsBuilder.build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class BatchControllerSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createBatch. */ + public UnaryCallSettings createBatchSettings() { + return ((BatchControllerStubSettings) getStubSettings()).createBatchSettings(); + } + + /** Returns the object with the settings used for calls to createBatch. */ + public OperationCallSettings + createBatchOperationSettings() { + return ((BatchControllerStubSettings) getStubSettings()).createBatchOperationSettings(); + } + + /** Returns the object with the settings used for calls to getBatch. */ + public UnaryCallSettings getBatchSettings() { + return ((BatchControllerStubSettings) getStubSettings()).getBatchSettings(); + } + + /** Returns the object with the settings used for calls to listBatches. */ + public PagedCallSettings + listBatchesSettings() { + return ((BatchControllerStubSettings) getStubSettings()).listBatchesSettings(); + } + + /** Returns the object with the settings used for calls to deleteBatch. */ + public UnaryCallSettings deleteBatchSettings() { + return ((BatchControllerStubSettings) getStubSettings()).deleteBatchSettings(); + } + + public static final BatchControllerSettings create(BatchControllerStubSettings stub) + throws IOException { + return new BatchControllerSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BatchControllerStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BatchControllerStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BatchControllerStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BatchControllerStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BatchControllerStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BatchControllerStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BatchControllerStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BatchControllerSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BatchControllerSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(BatchControllerStubSettings.newBuilder(clientContext)); + } + + protected Builder(BatchControllerSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BatchControllerStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(BatchControllerStubSettings.newBuilder()); + } + + public BatchControllerStubSettings.Builder getStubSettingsBuilder() { + return ((BatchControllerStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createBatch. */ + public UnaryCallSettings.Builder createBatchSettings() { + return getStubSettingsBuilder().createBatchSettings(); + } + + /** Returns the builder for the settings used for calls to createBatch. */ + public OperationCallSettings.Builder + createBatchOperationSettings() { + return getStubSettingsBuilder().createBatchOperationSettings(); + } + + /** Returns the builder for the settings used for calls to getBatch. */ + public UnaryCallSettings.Builder getBatchSettings() { + return getStubSettingsBuilder().getBatchSettings(); + } + + /** Returns the builder for the settings used for calls to listBatches. */ + public PagedCallSettings.Builder< + ListBatchesRequest, ListBatchesResponse, ListBatchesPagedResponse> + listBatchesSettings() { + return getStubSettingsBuilder().listBatchesSettings(); + } + + /** Returns the builder for the settings used for calls to deleteBatch. */ + public UnaryCallSettings.Builder deleteBatchSettings() { + return getStubSettingsBuilder().deleteBatchSettings(); + } + + @Override + public BatchControllerSettings build() throws IOException { + return new BatchControllerSettings(this); + } + } +} diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json index 1b98e6d0..26e0eeb5 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/gapic_metadata.json @@ -29,6 +29,27 @@ } } }, + "BatchController": { + "clients": { + "grpc": { + "libraryClient": "BatchControllerClient", + "rpcs": { + "CreateBatch": { + "methods": ["createBatchAsync", "createBatchAsync", "createBatchAsync", "createBatchOperationCallable", "createBatchCallable"] + }, + "DeleteBatch": { + "methods": ["deleteBatch", "deleteBatch", "deleteBatch", "deleteBatchCallable"] + }, + "GetBatch": { + "methods": ["getBatch", "getBatch", "getBatch", "getBatchCallable"] + }, + "ListBatches": { + "methods": ["listBatches", "listBatches", "listBatches", "listBatchesPagedCallable", "listBatchesCallable"] + } + } + } + } + }, "ClusterController": { "clients": { "grpc": { diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/package-info.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/package-info.java index 246d9b5f..ff77c4df 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/package-info.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/package-info.java @@ -33,6 +33,19 @@ * } * } * + *

======================= BatchControllerClient ======================= + * + *

Service Description: The BatchController provides methods to manage batch workloads. + * + *

Sample for BatchControllerClient: + * + *

{@code
+ * try (BatchControllerClient batchControllerClient = BatchControllerClient.create()) {
+ *   BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]");
+ *   Batch response = batchControllerClient.getBatch(name);
+ * }
+ * }
+ * *

======================= ClusterControllerClient ======================= * *

Service Description: The ClusterControllerService provides methods to manage clusters of diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStub.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStub.java new file mode 100644 index 00000000..3c8721b8 --- /dev/null +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStub.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1.stub; + +import static com.google.cloud.dataproc.v1.BatchControllerClient.ListBatchesPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1.Batch; +import com.google.cloud.dataproc.v1.BatchOperationMetadata; +import com.google.cloud.dataproc.v1.CreateBatchRequest; +import com.google.cloud.dataproc.v1.DeleteBatchRequest; +import com.google.cloud.dataproc.v1.GetBatchRequest; +import com.google.cloud.dataproc.v1.ListBatchesRequest; +import com.google.cloud.dataproc.v1.ListBatchesResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the BatchController service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class BatchControllerStub implements BackgroundResource { + + public OperationsStub getOperationsStub() { + throw new UnsupportedOperationException("Not implemented: getOperationsStub()"); + } + + public OperationCallable + createBatchOperationCallable() { + throw new UnsupportedOperationException("Not implemented: createBatchOperationCallable()"); + } + + public UnaryCallable createBatchCallable() { + throw new UnsupportedOperationException("Not implemented: createBatchCallable()"); + } + + public UnaryCallable getBatchCallable() { + throw new UnsupportedOperationException("Not implemented: getBatchCallable()"); + } + + public UnaryCallable listBatchesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listBatchesPagedCallable()"); + } + + public UnaryCallable listBatchesCallable() { + throw new UnsupportedOperationException("Not implemented: listBatchesCallable()"); + } + + public UnaryCallable deleteBatchCallable() { + throw new UnsupportedOperationException("Not implemented: deleteBatchCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStubSettings.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStubSettings.java new file mode 100644 index 00000000..793163de --- /dev/null +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/BatchControllerStubSettings.java @@ -0,0 +1,448 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1.stub; + +import static com.google.cloud.dataproc.v1.BatchControllerClient.ListBatchesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1.Batch; +import com.google.cloud.dataproc.v1.BatchOperationMetadata; +import com.google.cloud.dataproc.v1.CreateBatchRequest; +import com.google.cloud.dataproc.v1.DeleteBatchRequest; +import com.google.cloud.dataproc.v1.GetBatchRequest; +import com.google.cloud.dataproc.v1.ListBatchesRequest; +import com.google.cloud.dataproc.v1.ListBatchesResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BatchControllerStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of getBatch to 30 seconds: + * + *

{@code
+ * BatchControllerStubSettings.Builder batchControllerSettingsBuilder =
+ *     BatchControllerStubSettings.newBuilder();
+ * batchControllerSettingsBuilder
+ *     .getBatchSettings()
+ *     .setRetrySettings(
+ *         batchControllerSettingsBuilder
+ *             .getBatchSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setTotalTimeout(Duration.ofSeconds(30))
+ *             .build());
+ * BatchControllerStubSettings batchControllerSettings = batchControllerSettingsBuilder.build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class BatchControllerStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder().add("https://www.googleapis.com/auth/cloud-platform").build(); + + private final UnaryCallSettings createBatchSettings; + private final OperationCallSettings + createBatchOperationSettings; + private final UnaryCallSettings getBatchSettings; + private final PagedCallSettings + listBatchesSettings; + private final UnaryCallSettings deleteBatchSettings; + + private static final PagedListDescriptor + LIST_BATCHES_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListBatchesRequest injectToken(ListBatchesRequest payload, String token) { + return ListBatchesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListBatchesRequest injectPageSize(ListBatchesRequest payload, int pageSize) { + return ListBatchesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListBatchesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListBatchesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListBatchesResponse payload) { + return payload.getBatchesList() == null + ? ImmutableList.of() + : payload.getBatchesList(); + } + }; + + private static final PagedListResponseFactory< + ListBatchesRequest, ListBatchesResponse, ListBatchesPagedResponse> + LIST_BATCHES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListBatchesRequest, ListBatchesResponse, ListBatchesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListBatchesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_BATCHES_PAGE_STR_DESC, request, context); + return ListBatchesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to createBatch. */ + public UnaryCallSettings createBatchSettings() { + return createBatchSettings; + } + + /** Returns the object with the settings used for calls to createBatch. */ + public OperationCallSettings + createBatchOperationSettings() { + return createBatchOperationSettings; + } + + /** Returns the object with the settings used for calls to getBatch. */ + public UnaryCallSettings getBatchSettings() { + return getBatchSettings; + } + + /** Returns the object with the settings used for calls to listBatches. */ + public PagedCallSettings + listBatchesSettings() { + return listBatchesSettings; + } + + /** Returns the object with the settings used for calls to deleteBatch. */ + public UnaryCallSettings deleteBatchSettings() { + return deleteBatchSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BatchControllerStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBatchControllerStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "dataproc.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "dataproc.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BatchControllerStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BatchControllerStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createBatchSettings = settingsBuilder.createBatchSettings().build(); + createBatchOperationSettings = settingsBuilder.createBatchOperationSettings().build(); + getBatchSettings = settingsBuilder.getBatchSettings().build(); + listBatchesSettings = settingsBuilder.listBatchesSettings().build(); + deleteBatchSettings = settingsBuilder.deleteBatchSettings().build(); + } + + /** Builder for BatchControllerStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder createBatchSettings; + private final OperationCallSettings.Builder + createBatchOperationSettings; + private final UnaryCallSettings.Builder getBatchSettings; + private final PagedCallSettings.Builder< + ListBatchesRequest, ListBatchesResponse, ListBatchesPagedResponse> + listBatchesSettings; + private final UnaryCallSettings.Builder deleteBatchSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = RetrySettings.newBuilder().setRpcTimeoutMultiplier(1.0).build(); + definitions.put("no_retry_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createBatchSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createBatchOperationSettings = OperationCallSettings.newBuilder(); + getBatchSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listBatchesSettings = PagedCallSettings.newBuilder(LIST_BATCHES_PAGE_STR_FACT); + deleteBatchSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createBatchSettings, getBatchSettings, listBatchesSettings, deleteBatchSettings); + initDefaults(this); + } + + protected Builder(BatchControllerStubSettings settings) { + super(settings); + + createBatchSettings = settings.createBatchSettings.toBuilder(); + createBatchOperationSettings = settings.createBatchOperationSettings.toBuilder(); + getBatchSettings = settings.getBatchSettings.toBuilder(); + listBatchesSettings = settings.listBatchesSettings.toBuilder(); + deleteBatchSettings = settings.deleteBatchSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createBatchSettings, getBatchSettings, listBatchesSettings, deleteBatchSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createBatchSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .getBatchSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .listBatchesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .deleteBatchSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .createBatchOperationSettings() + .setInitialCallSettings( + UnaryCallSettings.newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Batch.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(BatchOperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofMillis(300000L)) + .build())); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createBatch. */ + public UnaryCallSettings.Builder createBatchSettings() { + return createBatchSettings; + } + + /** Returns the builder for the settings used for calls to createBatch. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder + createBatchOperationSettings() { + return createBatchOperationSettings; + } + + /** Returns the builder for the settings used for calls to getBatch. */ + public UnaryCallSettings.Builder getBatchSettings() { + return getBatchSettings; + } + + /** Returns the builder for the settings used for calls to listBatches. */ + public PagedCallSettings.Builder< + ListBatchesRequest, ListBatchesResponse, ListBatchesPagedResponse> + listBatchesSettings() { + return listBatchesSettings; + } + + /** Returns the builder for the settings used for calls to deleteBatch. */ + public UnaryCallSettings.Builder deleteBatchSettings() { + return deleteBatchSettings; + } + + @Override + public BatchControllerStubSettings build() throws IOException { + return new BatchControllerStubSettings(this); + } + } +} diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerCallableFactory.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerCallableFactory.java new file mode 100644 index 00000000..0a5ca6af --- /dev/null +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the BatchController service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcBatchControllerCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerStub.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerStub.java new file mode 100644 index 00000000..c7b481b6 --- /dev/null +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/stub/GrpcBatchControllerStub.java @@ -0,0 +1,277 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1.stub; + +import static com.google.cloud.dataproc.v1.BatchControllerClient.ListBatchesPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1.Batch; +import com.google.cloud.dataproc.v1.BatchOperationMetadata; +import com.google.cloud.dataproc.v1.CreateBatchRequest; +import com.google.cloud.dataproc.v1.DeleteBatchRequest; +import com.google.cloud.dataproc.v1.GetBatchRequest; +import com.google.cloud.dataproc.v1.ListBatchesRequest; +import com.google.cloud.dataproc.v1.ListBatchesResponse; +import com.google.common.collect.ImmutableMap; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the BatchController service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcBatchControllerStub extends BatchControllerStub { + private static final MethodDescriptor createBatchMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1.BatchController/CreateBatch") + .setRequestMarshaller(ProtoUtils.marshaller(CreateBatchRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + + private static final MethodDescriptor getBatchMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1.BatchController/GetBatch") + .setRequestMarshaller(ProtoUtils.marshaller(GetBatchRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Batch.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + listBatchesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1.BatchController/ListBatches") + .setRequestMarshaller(ProtoUtils.marshaller(ListBatchesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListBatchesResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor deleteBatchMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1.BatchController/DeleteBatch") + .setRequestMarshaller(ProtoUtils.marshaller(DeleteBatchRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .build(); + + private final UnaryCallable createBatchCallable; + private final OperationCallable + createBatchOperationCallable; + private final UnaryCallable getBatchCallable; + private final UnaryCallable listBatchesCallable; + private final UnaryCallable + listBatchesPagedCallable; + private final UnaryCallable deleteBatchCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBatchControllerStub create(BatchControllerStubSettings settings) + throws IOException { + return new GrpcBatchControllerStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBatchControllerStub create(ClientContext clientContext) + throws IOException { + return new GrpcBatchControllerStub( + BatchControllerStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBatchControllerStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBatchControllerStub( + BatchControllerStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBatchControllerStub, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBatchControllerStub( + BatchControllerStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new GrpcBatchControllerCallableFactory()); + } + + /** + * Constructs an instance of GrpcBatchControllerStub, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBatchControllerStub( + BatchControllerStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createBatchTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createBatchMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("parent", String.valueOf(request.getParent())); + return params.build(); + }) + .build(); + GrpcCallSettings getBatchTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getBatchMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("name", String.valueOf(request.getName())); + return params.build(); + }) + .build(); + GrpcCallSettings listBatchesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listBatchesMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("parent", String.valueOf(request.getParent())); + return params.build(); + }) + .build(); + GrpcCallSettings deleteBatchTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteBatchMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("name", String.valueOf(request.getName())); + return params.build(); + }) + .build(); + + this.createBatchCallable = + callableFactory.createUnaryCallable( + createBatchTransportSettings, settings.createBatchSettings(), clientContext); + this.createBatchOperationCallable = + callableFactory.createOperationCallable( + createBatchTransportSettings, + settings.createBatchOperationSettings(), + clientContext, + operationsStub); + this.getBatchCallable = + callableFactory.createUnaryCallable( + getBatchTransportSettings, settings.getBatchSettings(), clientContext); + this.listBatchesCallable = + callableFactory.createUnaryCallable( + listBatchesTransportSettings, settings.listBatchesSettings(), clientContext); + this.listBatchesPagedCallable = + callableFactory.createPagedCallable( + listBatchesTransportSettings, settings.listBatchesSettings(), clientContext); + this.deleteBatchCallable = + callableFactory.createUnaryCallable( + deleteBatchTransportSettings, settings.deleteBatchSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createBatchCallable() { + return createBatchCallable; + } + + @Override + public OperationCallable + createBatchOperationCallable() { + return createBatchOperationCallable; + } + + @Override + public UnaryCallable getBatchCallable() { + return getBatchCallable; + } + + @Override + public UnaryCallable listBatchesCallable() { + return listBatchesCallable; + } + + @Override + public UnaryCallable listBatchesPagedCallable() { + return listBatchesPagedCallable; + } + + @Override + public UnaryCallable deleteBatchCallable() { + return deleteBatchCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/BatchControllerClientTest.java b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/BatchControllerClientTest.java new file mode 100644 index 00000000..e8825b6f --- /dev/null +++ b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/BatchControllerClientTest.java @@ -0,0 +1,468 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import static com.google.cloud.dataproc.v1.BatchControllerClient.ListBatchesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class BatchControllerClientTest { + private static MockBatchController mockBatchController; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private BatchControllerClient client; + + @BeforeClass + public static void startStaticServer() { + mockBatchController = new MockBatchController(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBatchController)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + BatchControllerSettings settings = + BatchControllerSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BatchControllerClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createBatchTest() throws Exception { + Batch expectedResponse = + Batch.newBuilder() + .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString()) + .setUuid("uuid3601339") + .setCreateTime(Timestamp.newBuilder().build()) + .setRuntimeInfo(RuntimeInfo.newBuilder().build()) + .setStateMessage("stateMessage1128185398") + .setStateTime(Timestamp.newBuilder().build()) + .setCreator("creator1028554796") + .putAllLabels(new HashMap()) + .setRuntimeConfig(RuntimeConfig.newBuilder().build()) + .setEnvironmentConfig(EnvironmentConfig.newBuilder().build()) + .setOperation("operation1662702951") + .addAllStateHistory(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createBatchTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockBatchController.addResponse(resultOperation); + + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + Batch batch = Batch.newBuilder().build(); + String batchId = "batchId-331744779"; + + Batch actualResponse = client.createBatchAsync(parent, batch, batchId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBatchRequest actualRequest = ((CreateBatchRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(batch, actualRequest.getBatch()); + Assert.assertEquals(batchId, actualRequest.getBatchId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBatchExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + Batch batch = Batch.newBuilder().build(); + String batchId = "batchId-331744779"; + client.createBatchAsync(parent, batch, batchId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createBatchTest2() throws Exception { + Batch expectedResponse = + Batch.newBuilder() + .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString()) + .setUuid("uuid3601339") + .setCreateTime(Timestamp.newBuilder().build()) + .setRuntimeInfo(RuntimeInfo.newBuilder().build()) + .setStateMessage("stateMessage1128185398") + .setStateTime(Timestamp.newBuilder().build()) + .setCreator("creator1028554796") + .putAllLabels(new HashMap()) + .setRuntimeConfig(RuntimeConfig.newBuilder().build()) + .setEnvironmentConfig(EnvironmentConfig.newBuilder().build()) + .setOperation("operation1662702951") + .addAllStateHistory(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createBatchTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockBatchController.addResponse(resultOperation); + + String parent = "parent-995424086"; + Batch batch = Batch.newBuilder().build(); + String batchId = "batchId-331744779"; + + Batch actualResponse = client.createBatchAsync(parent, batch, batchId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBatchRequest actualRequest = ((CreateBatchRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(batch, actualRequest.getBatch()); + Assert.assertEquals(batchId, actualRequest.getBatchId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBatchExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + String parent = "parent-995424086"; + Batch batch = Batch.newBuilder().build(); + String batchId = "batchId-331744779"; + client.createBatchAsync(parent, batch, batchId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void getBatchTest() throws Exception { + Batch expectedResponse = + Batch.newBuilder() + .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString()) + .setUuid("uuid3601339") + .setCreateTime(Timestamp.newBuilder().build()) + .setRuntimeInfo(RuntimeInfo.newBuilder().build()) + .setStateMessage("stateMessage1128185398") + .setStateTime(Timestamp.newBuilder().build()) + .setCreator("creator1028554796") + .putAllLabels(new HashMap()) + .setRuntimeConfig(RuntimeConfig.newBuilder().build()) + .setEnvironmentConfig(EnvironmentConfig.newBuilder().build()) + .setOperation("operation1662702951") + .addAllStateHistory(new ArrayList()) + .build(); + mockBatchController.addResponse(expectedResponse); + + BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]"); + + Batch actualResponse = client.getBatch(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBatchRequest actualRequest = ((GetBatchRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBatchExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]"); + client.getBatch(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBatchTest2() throws Exception { + Batch expectedResponse = + Batch.newBuilder() + .setName(BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]").toString()) + .setUuid("uuid3601339") + .setCreateTime(Timestamp.newBuilder().build()) + .setRuntimeInfo(RuntimeInfo.newBuilder().build()) + .setStateMessage("stateMessage1128185398") + .setStateTime(Timestamp.newBuilder().build()) + .setCreator("creator1028554796") + .putAllLabels(new HashMap()) + .setRuntimeConfig(RuntimeConfig.newBuilder().build()) + .setEnvironmentConfig(EnvironmentConfig.newBuilder().build()) + .setOperation("operation1662702951") + .addAllStateHistory(new ArrayList()) + .build(); + mockBatchController.addResponse(expectedResponse); + + String name = "name3373707"; + + Batch actualResponse = client.getBatch(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBatchRequest actualRequest = ((GetBatchRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBatchExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + String name = "name3373707"; + client.getBatch(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBatchesTest() throws Exception { + Batch responsesElement = Batch.newBuilder().build(); + ListBatchesResponse expectedResponse = + ListBatchesResponse.newBuilder() + .setNextPageToken("") + .addAllBatches(Arrays.asList(responsesElement)) + .build(); + mockBatchController.addResponse(expectedResponse); + + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + + ListBatchesPagedResponse pagedListResponse = client.listBatches(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBatchesList().get(0), resources.get(0)); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBatchesRequest actualRequest = ((ListBatchesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBatchesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + client.listBatches(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBatchesTest2() throws Exception { + Batch responsesElement = Batch.newBuilder().build(); + ListBatchesResponse expectedResponse = + ListBatchesResponse.newBuilder() + .setNextPageToken("") + .addAllBatches(Arrays.asList(responsesElement)) + .build(); + mockBatchController.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListBatchesPagedResponse pagedListResponse = client.listBatches(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBatchesList().get(0), resources.get(0)); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBatchesRequest actualRequest = ((ListBatchesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBatchesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + String parent = "parent-995424086"; + client.listBatches(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBatchTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockBatchController.addResponse(expectedResponse); + + BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]"); + + client.deleteBatch(name); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBatchRequest actualRequest = ((DeleteBatchRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBatchExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + BatchName name = BatchName.of("[PROJECT]", "[LOCATION]", "[BATCH]"); + client.deleteBatch(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBatchTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockBatchController.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteBatch(name); + + List actualRequests = mockBatchController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBatchRequest actualRequest = ((DeleteBatchRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBatchExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBatchController.addException(exception); + + try { + String name = "name3373707"; + client.deleteBatch(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchController.java b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchController.java new file mode 100644 index 00000000..0d35efbd --- /dev/null +++ b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchController.java @@ -0,0 +1,59 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBatchController implements MockGrpcService { + private final MockBatchControllerImpl serviceImpl; + + public MockBatchController() { + serviceImpl = new MockBatchControllerImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchControllerImpl.java b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchControllerImpl.java new file mode 100644 index 00000000..ae3b2c9d --- /dev/null +++ b/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1/MockBatchControllerImpl.java @@ -0,0 +1,143 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import com.google.api.core.BetaApi; +import com.google.cloud.dataproc.v1.BatchControllerGrpc.BatchControllerImplBase; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBatchControllerImpl extends BatchControllerImplBase { + private List requests; + private Queue responses; + + public MockBatchControllerImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createBatch(CreateBatchRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateBatch, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getBatch(GetBatchRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Batch) { + requests.add(request); + responseObserver.onNext(((Batch) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetBatch, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Batch.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listBatches( + ListBatchesRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListBatchesResponse) { + requests.add(request); + responseObserver.onNext(((ListBatchesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListBatches, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListBatchesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteBatch(DeleteBatchRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteBatch, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchControllerGrpc.java b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchControllerGrpc.java new file mode 100644 index 00000000..a7d29c06 --- /dev/null +++ b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchControllerGrpc.java @@ -0,0 +1,693 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * The BatchController provides methods to manage batch workloads.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler", + comments = "Source: google/cloud/dataproc/v1/batches.proto") +public final class BatchControllerGrpc { + + private BatchControllerGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.dataproc.v1.BatchController"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.CreateBatchRequest, com.google.longrunning.Operation> + getCreateBatchMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateBatch", + requestType = com.google.cloud.dataproc.v1.CreateBatchRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.CreateBatchRequest, com.google.longrunning.Operation> + getCreateBatchMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.CreateBatchRequest, com.google.longrunning.Operation> + getCreateBatchMethod; + if ((getCreateBatchMethod = BatchControllerGrpc.getCreateBatchMethod) == null) { + synchronized (BatchControllerGrpc.class) { + if ((getCreateBatchMethod = BatchControllerGrpc.getCreateBatchMethod) == null) { + BatchControllerGrpc.getCreateBatchMethod = + getCreateBatchMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateBatch")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.CreateBatchRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new BatchControllerMethodDescriptorSupplier("CreateBatch")) + .build(); + } + } + } + return getCreateBatchMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.GetBatchRequest, com.google.cloud.dataproc.v1.Batch> + getGetBatchMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetBatch", + requestType = com.google.cloud.dataproc.v1.GetBatchRequest.class, + responseType = com.google.cloud.dataproc.v1.Batch.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.GetBatchRequest, com.google.cloud.dataproc.v1.Batch> + getGetBatchMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.GetBatchRequest, com.google.cloud.dataproc.v1.Batch> + getGetBatchMethod; + if ((getGetBatchMethod = BatchControllerGrpc.getGetBatchMethod) == null) { + synchronized (BatchControllerGrpc.class) { + if ((getGetBatchMethod = BatchControllerGrpc.getGetBatchMethod) == null) { + BatchControllerGrpc.getGetBatchMethod = + getGetBatchMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetBatch")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.GetBatchRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.Batch.getDefaultInstance())) + .setSchemaDescriptor(new BatchControllerMethodDescriptorSupplier("GetBatch")) + .build(); + } + } + } + return getGetBatchMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.ListBatchesRequest, + com.google.cloud.dataproc.v1.ListBatchesResponse> + getListBatchesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListBatches", + requestType = com.google.cloud.dataproc.v1.ListBatchesRequest.class, + responseType = com.google.cloud.dataproc.v1.ListBatchesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.ListBatchesRequest, + com.google.cloud.dataproc.v1.ListBatchesResponse> + getListBatchesMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.ListBatchesRequest, + com.google.cloud.dataproc.v1.ListBatchesResponse> + getListBatchesMethod; + if ((getListBatchesMethod = BatchControllerGrpc.getListBatchesMethod) == null) { + synchronized (BatchControllerGrpc.class) { + if ((getListBatchesMethod = BatchControllerGrpc.getListBatchesMethod) == null) { + BatchControllerGrpc.getListBatchesMethod = + getListBatchesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListBatches")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.ListBatchesRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.ListBatchesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BatchControllerMethodDescriptorSupplier("ListBatches")) + .build(); + } + } + } + return getListBatchesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.DeleteBatchRequest, com.google.protobuf.Empty> + getDeleteBatchMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteBatch", + requestType = com.google.cloud.dataproc.v1.DeleteBatchRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.DeleteBatchRequest, com.google.protobuf.Empty> + getDeleteBatchMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.dataproc.v1.DeleteBatchRequest, com.google.protobuf.Empty> + getDeleteBatchMethod; + if ((getDeleteBatchMethod = BatchControllerGrpc.getDeleteBatchMethod) == null) { + synchronized (BatchControllerGrpc.class) { + if ((getDeleteBatchMethod = BatchControllerGrpc.getDeleteBatchMethod) == null) { + BatchControllerGrpc.getDeleteBatchMethod = + getDeleteBatchMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteBatch")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1.DeleteBatchRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new BatchControllerMethodDescriptorSupplier("DeleteBatch")) + .build(); + } + } + } + return getDeleteBatchMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BatchControllerStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BatchControllerStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BatchControllerStub(channel, callOptions); + } + }; + return BatchControllerStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BatchControllerBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BatchControllerBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BatchControllerBlockingStub(channel, callOptions); + } + }; + return BatchControllerBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BatchControllerFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BatchControllerFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BatchControllerFutureStub(channel, callOptions); + } + }; + return BatchControllerFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * The BatchController provides methods to manage batch workloads.
+   * 
+ */ + public abstract static class BatchControllerImplBase implements io.grpc.BindableService { + + /** + * + * + *
+     * Creates a batch workload that executes asynchronously.
+     * 
+ */ + public void createBatch( + com.google.cloud.dataproc.v1.CreateBatchRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateBatchMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets the batch workload resource representation.
+     * 
+ */ + public void getBatch( + com.google.cloud.dataproc.v1.GetBatchRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetBatchMethod(), responseObserver); + } + + /** + * + * + *
+     * Lists batch workloads.
+     * 
+ */ + public void listBatches( + com.google.cloud.dataproc.v1.ListBatchesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListBatchesMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes the batch workload resource. If the batch is not in terminal state,
+     * the delete fails and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public void deleteBatch( + com.google.cloud.dataproc.v1.DeleteBatchRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteBatchMethod(), responseObserver); + } + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateBatchMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1.CreateBatchRequest, + com.google.longrunning.Operation>(this, METHODID_CREATE_BATCH))) + .addMethod( + getGetBatchMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1.GetBatchRequest, + com.google.cloud.dataproc.v1.Batch>(this, METHODID_GET_BATCH))) + .addMethod( + getListBatchesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1.ListBatchesRequest, + com.google.cloud.dataproc.v1.ListBatchesResponse>( + this, METHODID_LIST_BATCHES))) + .addMethod( + getDeleteBatchMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1.DeleteBatchRequest, com.google.protobuf.Empty>( + this, METHODID_DELETE_BATCH))) + .build(); + } + } + + /** + * + * + *
+   * The BatchController provides methods to manage batch workloads.
+   * 
+ */ + public static final class BatchControllerStub + extends io.grpc.stub.AbstractAsyncStub { + private BatchControllerStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BatchControllerStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BatchControllerStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a batch workload that executes asynchronously.
+     * 
+ */ + public void createBatch( + com.google.cloud.dataproc.v1.CreateBatchRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateBatchMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets the batch workload resource representation.
+     * 
+ */ + public void getBatch( + com.google.cloud.dataproc.v1.GetBatchRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetBatchMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Lists batch workloads.
+     * 
+ */ + public void listBatches( + com.google.cloud.dataproc.v1.ListBatchesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListBatchesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes the batch workload resource. If the batch is not in terminal state,
+     * the delete fails and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public void deleteBatch( + com.google.cloud.dataproc.v1.DeleteBatchRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteBatchMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * + * + *
+   * The BatchController provides methods to manage batch workloads.
+   * 
+ */ + public static final class BatchControllerBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private BatchControllerBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BatchControllerBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BatchControllerBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a batch workload that executes asynchronously.
+     * 
+ */ + public com.google.longrunning.Operation createBatch( + com.google.cloud.dataproc.v1.CreateBatchRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateBatchMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets the batch workload resource representation.
+     * 
+ */ + public com.google.cloud.dataproc.v1.Batch getBatch( + com.google.cloud.dataproc.v1.GetBatchRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetBatchMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Lists batch workloads.
+     * 
+ */ + public com.google.cloud.dataproc.v1.ListBatchesResponse listBatches( + com.google.cloud.dataproc.v1.ListBatchesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListBatchesMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes the batch workload resource. If the batch is not in terminal state,
+     * the delete fails and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public com.google.protobuf.Empty deleteBatch( + com.google.cloud.dataproc.v1.DeleteBatchRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteBatchMethod(), getCallOptions(), request); + } + } + + /** + * + * + *
+   * The BatchController provides methods to manage batch workloads.
+   * 
+ */ + public static final class BatchControllerFutureStub + extends io.grpc.stub.AbstractFutureStub { + private BatchControllerFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BatchControllerFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BatchControllerFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a batch workload that executes asynchronously.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + createBatch(com.google.cloud.dataproc.v1.CreateBatchRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateBatchMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets the batch workload resource representation.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + getBatch(com.google.cloud.dataproc.v1.GetBatchRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetBatchMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Lists batch workloads.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.dataproc.v1.ListBatchesResponse> + listBatches(com.google.cloud.dataproc.v1.ListBatchesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListBatchesMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes the batch workload resource. If the batch is not in terminal state,
+     * the delete fails and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteBatch(com.google.cloud.dataproc.v1.DeleteBatchRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteBatchMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_BATCH = 0; + private static final int METHODID_GET_BATCH = 1; + private static final int METHODID_LIST_BATCHES = 2; + private static final int METHODID_DELETE_BATCH = 3; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final BatchControllerImplBase serviceImpl; + private final int methodId; + + MethodHandlers(BatchControllerImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_BATCH: + serviceImpl.createBatch( + (com.google.cloud.dataproc.v1.CreateBatchRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_BATCH: + serviceImpl.getBatch( + (com.google.cloud.dataproc.v1.GetBatchRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_BATCHES: + serviceImpl.listBatches( + (com.google.cloud.dataproc.v1.ListBatchesRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_BATCH: + serviceImpl.deleteBatch( + (com.google.cloud.dataproc.v1.DeleteBatchRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private abstract static class BatchControllerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BatchControllerBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BatchController"); + } + } + + private static final class BatchControllerFileDescriptorSupplier + extends BatchControllerBaseDescriptorSupplier { + BatchControllerFileDescriptorSupplier() {} + } + + private static final class BatchControllerMethodDescriptorSupplier + extends BatchControllerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + BatchControllerMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BatchControllerGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BatchControllerFileDescriptorSupplier()) + .addMethod(getCreateBatchMethod()) + .addMethod(getGetBatchMethod()) + .addMethod(getListBatchesMethod()) + .addMethod(getDeleteBatchMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Batch.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Batch.java new file mode 100644 index 00000000..56914113 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Batch.java @@ -0,0 +1,6770 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A representation of a batch workload in the service.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.Batch} + */ +public final class Batch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.Batch) + BatchOrBuilder { + private static final long serialVersionUID = 0L; + // Use Batch.newBuilder() to construct. + private Batch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Batch() { + name_ = ""; + uuid_ = ""; + state_ = 0; + stateMessage_ = ""; + creator_ = ""; + operation_ = ""; + stateHistory_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new Batch(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private Batch( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + uuid_ = s; + break; + } + case 26: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (createTime_ != null) { + subBuilder = createTime_.toBuilder(); + } + createTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(createTime_); + createTime_ = subBuilder.buildPartial(); + } + + break; + } + case 34: + { + com.google.cloud.dataproc.v1.PySparkBatch.Builder subBuilder = null; + if (batchConfigCase_ == 4) { + subBuilder = ((com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_).toBuilder(); + } + batchConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.PySparkBatch.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_); + batchConfig_ = subBuilder.buildPartial(); + } + batchConfigCase_ = 4; + break; + } + case 42: + { + com.google.cloud.dataproc.v1.SparkBatch.Builder subBuilder = null; + if (batchConfigCase_ == 5) { + subBuilder = ((com.google.cloud.dataproc.v1.SparkBatch) batchConfig_).toBuilder(); + } + batchConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.SparkBatch.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1.SparkBatch) batchConfig_); + batchConfig_ = subBuilder.buildPartial(); + } + batchConfigCase_ = 5; + break; + } + case 50: + { + com.google.cloud.dataproc.v1.SparkRBatch.Builder subBuilder = null; + if (batchConfigCase_ == 6) { + subBuilder = ((com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_).toBuilder(); + } + batchConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.SparkRBatch.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_); + batchConfig_ = subBuilder.buildPartial(); + } + batchConfigCase_ = 6; + break; + } + case 58: + { + com.google.cloud.dataproc.v1.SparkSqlBatch.Builder subBuilder = null; + if (batchConfigCase_ == 7) { + subBuilder = + ((com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_).toBuilder(); + } + batchConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.SparkSqlBatch.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_); + batchConfig_ = subBuilder.buildPartial(); + } + batchConfigCase_ = 7; + break; + } + case 66: + { + com.google.cloud.dataproc.v1.RuntimeInfo.Builder subBuilder = null; + if (runtimeInfo_ != null) { + subBuilder = runtimeInfo_.toBuilder(); + } + runtimeInfo_ = + input.readMessage( + com.google.cloud.dataproc.v1.RuntimeInfo.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(runtimeInfo_); + runtimeInfo_ = subBuilder.buildPartial(); + } + + break; + } + case 72: + { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 82: + { + java.lang.String s = input.readStringRequireUtf8(); + + stateMessage_ = s; + break; + } + case 90: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (stateTime_ != null) { + subBuilder = stateTime_.toBuilder(); + } + stateTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stateTime_); + stateTime_ = subBuilder.buildPartial(); + } + + break; + } + case 98: + { + java.lang.String s = input.readStringRequireUtf8(); + + creator_ = s; + break; + } + case 106: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + labels_ = + com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + labels_.getMutableMap().put(labels__.getKey(), labels__.getValue()); + break; + } + case 114: + { + com.google.cloud.dataproc.v1.RuntimeConfig.Builder subBuilder = null; + if (runtimeConfig_ != null) { + subBuilder = runtimeConfig_.toBuilder(); + } + runtimeConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.RuntimeConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(runtimeConfig_); + runtimeConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 122: + { + com.google.cloud.dataproc.v1.EnvironmentConfig.Builder subBuilder = null; + if (environmentConfig_ != null) { + subBuilder = environmentConfig_.toBuilder(); + } + environmentConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.EnvironmentConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(environmentConfig_); + environmentConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 130: + { + java.lang.String s = input.readStringRequireUtf8(); + + operation_ = s; + break; + } + case 138: + { + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + stateHistory_ = + new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + stateHistory_.add( + input.readMessage( + com.google.cloud.dataproc.v1.Batch.StateHistory.parser(), extensionRegistry)); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) != 0)) { + stateHistory_ = java.util.Collections.unmodifiableList(stateHistory_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 13: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.Batch.class, + com.google.cloud.dataproc.v1.Batch.Builder.class); + } + + /** + * + * + *
+   * The batch state.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1.Batch.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * The batch state is unknown.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
+     * The batch is created before running.
+     * 
+ * + * PENDING = 1; + */ + PENDING(1), + /** + * + * + *
+     * The batch is running.
+     * 
+ * + * RUNNING = 2; + */ + RUNNING(2), + /** + * + * + *
+     * The batch is cancelling.
+     * 
+ * + * CANCELLING = 3; + */ + CANCELLING(3), + /** + * + * + *
+     * The batch cancellation was successful.
+     * 
+ * + * CANCELLED = 4; + */ + CANCELLED(4), + /** + * + * + *
+     * The batch completed successfully.
+     * 
+ * + * SUCCEEDED = 5; + */ + SUCCEEDED(5), + /** + * + * + *
+     * The batch is no longer running due to an error.
+     * 
+ * + * FAILED = 6; + */ + FAILED(6), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * The batch state is unknown.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * The batch is created before running.
+     * 
+ * + * PENDING = 1; + */ + public static final int PENDING_VALUE = 1; + /** + * + * + *
+     * The batch is running.
+     * 
+ * + * RUNNING = 2; + */ + public static final int RUNNING_VALUE = 2; + /** + * + * + *
+     * The batch is cancelling.
+     * 
+ * + * CANCELLING = 3; + */ + public static final int CANCELLING_VALUE = 3; + /** + * + * + *
+     * The batch cancellation was successful.
+     * 
+ * + * CANCELLED = 4; + */ + public static final int CANCELLED_VALUE = 4; + /** + * + * + *
+     * The batch completed successfully.
+     * 
+ * + * SUCCEEDED = 5; + */ + public static final int SUCCEEDED_VALUE = 5; + /** + * + * + *
+     * The batch is no longer running due to an error.
+     * 
+ * + * FAILED = 6; + */ + public static final int FAILED_VALUE = 6; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return PENDING; + case 2: + return RUNNING; + case 3: + return CANCELLING; + case 4: + return CANCELLED; + case 5: + return SUCCEEDED; + case 6: + return FAILED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.dataproc.v1.Batch.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1.Batch.State) + } + + public interface StateHistoryOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.Batch.StateHistory) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Output only. The state of the batch at this point in history.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + /** + * + * + *
+     * Output only. The state of the batch at this point in history.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.cloud.dataproc.v1.Batch.State getState(); + + /** + * + * + *
+     * Output only. Details about the state at this point in history.
+     * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The stateMessage. + */ + java.lang.String getStateMessage(); + /** + * + * + *
+     * Output only. Details about the state at this point in history.
+     * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for stateMessage. + */ + com.google.protobuf.ByteString getStateMessageBytes(); + + /** + * + * + *
+     * Output only. The time when the batch entered the historical state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the stateStartTime field is set. + */ + boolean hasStateStartTime(); + /** + * + * + *
+     * Output only. The time when the batch entered the historical state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The stateStartTime. + */ + com.google.protobuf.Timestamp getStateStartTime(); + /** + * + * + *
+     * Output only. The time when the batch entered the historical state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder(); + } + /** + * + * + *
+   * Historical state information.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.Batch.StateHistory} + */ + public static final class StateHistory extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.Batch.StateHistory) + StateHistoryOrBuilder { + private static final long serialVersionUID = 0L; + // Use StateHistory.newBuilder() to construct. + private StateHistory(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StateHistory() { + state_ = 0; + stateMessage_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StateHistory(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StateHistory( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + stateMessage_ = s; + break; + } + case 26: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (stateStartTime_ != null) { + subBuilder = stateStartTime_.toBuilder(); + } + stateStartTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stateStartTime_); + stateStartTime_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_StateHistory_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_StateHistory_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.Batch.StateHistory.class, + com.google.cloud.dataproc.v1.Batch.StateHistory.Builder.class); + } + + public static final int STATE_FIELD_NUMBER = 1; + private int state_; + /** + * + * + *
+     * Output only. The state of the batch at this point in history.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+     * Output only. The state of the batch at this point in history.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.Batch.State result = + com.google.cloud.dataproc.v1.Batch.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1.Batch.State.UNRECOGNIZED : result; + } + + public static final int STATE_MESSAGE_FIELD_NUMBER = 2; + private volatile java.lang.Object stateMessage_; + /** + * + * + *
+     * Output only. Details about the state at this point in history.
+     * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The stateMessage. + */ + @java.lang.Override + public java.lang.String getStateMessage() { + java.lang.Object ref = stateMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stateMessage_ = s; + return s; + } + } + /** + * + * + *
+     * Output only. Details about the state at this point in history.
+     * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for stateMessage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStateMessageBytes() { + java.lang.Object ref = stateMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + stateMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_START_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp stateStartTime_; + /** + * + * + *
+     * Output only. The time when the batch entered the historical state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the stateStartTime field is set. + */ + @java.lang.Override + public boolean hasStateStartTime() { + return stateStartTime_ != null; + } + /** + * + * + *
+     * Output only. The time when the batch entered the historical state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The stateStartTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStateStartTime() { + return stateStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : stateStartTime_; + } + /** + * + * + *
+     * Output only. The time when the batch entered the historical state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + return getStateStartTime(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (state_ != com.google.cloud.dataproc.v1.Batch.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, state_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(stateMessage_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, stateMessage_); + } + if (stateStartTime_ != null) { + output.writeMessage(3, getStateStartTime()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (state_ != com.google.cloud.dataproc.v1.Batch.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, state_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(stateMessage_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, stateMessage_); + } + if (stateStartTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStateStartTime()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.Batch.StateHistory)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.Batch.StateHistory other = + (com.google.cloud.dataproc.v1.Batch.StateHistory) obj; + + if (state_ != other.state_) return false; + if (!getStateMessage().equals(other.getStateMessage())) return false; + if (hasStateStartTime() != other.hasStateStartTime()) return false; + if (hasStateStartTime()) { + if (!getStateStartTime().equals(other.getStateStartTime())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + STATE_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getStateMessage().hashCode(); + if (hasStateStartTime()) { + hash = (37 * hash) + STATE_START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStateStartTime().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.Batch.StateHistory prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Historical state information.
+     * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.Batch.StateHistory} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.Batch.StateHistory) + com.google.cloud.dataproc.v1.Batch.StateHistoryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_StateHistory_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_StateHistory_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.Batch.StateHistory.class, + com.google.cloud.dataproc.v1.Batch.StateHistory.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.Batch.StateHistory.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + state_ = 0; + + stateMessage_ = ""; + + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_StateHistory_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.StateHistory getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.Batch.StateHistory.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.StateHistory build() { + com.google.cloud.dataproc.v1.Batch.StateHistory result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.StateHistory buildPartial() { + com.google.cloud.dataproc.v1.Batch.StateHistory result = + new com.google.cloud.dataproc.v1.Batch.StateHistory(this); + result.state_ = state_; + result.stateMessage_ = stateMessage_; + if (stateStartTimeBuilder_ == null) { + result.stateStartTime_ = stateStartTime_; + } else { + result.stateStartTime_ = stateStartTimeBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.Batch.StateHistory) { + return mergeFrom((com.google.cloud.dataproc.v1.Batch.StateHistory) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.Batch.StateHistory other) { + if (other == com.google.cloud.dataproc.v1.Batch.StateHistory.getDefaultInstance()) + return this; + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.getStateMessage().isEmpty()) { + stateMessage_ = other.stateMessage_; + onChanged(); + } + if (other.hasStateStartTime()) { + mergeStateStartTime(other.getStateStartTime()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.Batch.StateHistory parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.dataproc.v1.Batch.StateHistory) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int state_ = 0; + /** + * + * + *
+       * Output only. The state of the batch at this point in history.
+       * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+       * Output only. The state of the batch at this point in history.
+       * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + + state_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Output only. The state of the batch at this point in history.
+       * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.Batch.State result = + com.google.cloud.dataproc.v1.Batch.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1.Batch.State.UNRECOGNIZED : result; + } + /** + * + * + *
+       * Output only. The state of the batch at this point in history.
+       * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.cloud.dataproc.v1.Batch.State value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+       * Output only. The state of the batch at this point in history.
+       * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private java.lang.Object stateMessage_ = ""; + /** + * + * + *
+       * Output only. Details about the state at this point in history.
+       * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The stateMessage. + */ + public java.lang.String getStateMessage() { + java.lang.Object ref = stateMessage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stateMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Output only. Details about the state at this point in history.
+       * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for stateMessage. + */ + public com.google.protobuf.ByteString getStateMessageBytes() { + java.lang.Object ref = stateMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + stateMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Output only. Details about the state at this point in history.
+       * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The stateMessage to set. + * @return This builder for chaining. + */ + public Builder setStateMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + stateMessage_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Output only. Details about the state at this point in history.
+       * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearStateMessage() { + + stateMessage_ = getDefaultInstance().getStateMessage(); + onChanged(); + return this; + } + /** + * + * + *
+       * Output only. Details about the state at this point in history.
+       * 
+ * + * string state_message = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for stateMessage to set. + * @return This builder for chaining. + */ + public Builder setStateMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + stateMessage_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp stateStartTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + stateStartTimeBuilder_; + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the stateStartTime field is set. + */ + public boolean hasStateStartTime() { + return stateStartTimeBuilder_ != null || stateStartTime_ != null; + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The stateStartTime. + */ + public com.google.protobuf.Timestamp getStateStartTime() { + if (stateStartTimeBuilder_ == null) { + return stateStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : stateStartTime_; + } else { + return stateStartTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stateStartTime_ = value; + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStateStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = builderForValue.build(); + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (stateStartTime_ != null) { + stateStartTime_ = + com.google.protobuf.Timestamp.newBuilder(stateStartTime_) + .mergeFrom(value) + .buildPartial(); + } else { + stateStartTime_ = value; + } + onChanged(); + } else { + stateStartTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStateStartTime() { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + onChanged(); + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getStateStartTimeBuilder() { + + onChanged(); + return getStateStartTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + if (stateStartTimeBuilder_ != null) { + return stateStartTimeBuilder_.getMessageOrBuilder(); + } else { + return stateStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : stateStartTime_; + } + } + /** + * + * + *
+       * Output only. The time when the batch entered the historical state.
+       * 
+ * + * + * .google.protobuf.Timestamp state_start_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getStateStartTimeFieldBuilder() { + if (stateStartTimeBuilder_ == null) { + stateStartTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStateStartTime(), getParentForChildren(), isClean()); + stateStartTime_ = null; + } + return stateStartTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.Batch.StateHistory) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Batch.StateHistory) + private static final com.google.cloud.dataproc.v1.Batch.StateHistory DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.Batch.StateHistory(); + } + + public static com.google.cloud.dataproc.v1.Batch.StateHistory getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StateHistory parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StateHistory(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.StateHistory getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int batchConfigCase_ = 0; + private java.lang.Object batchConfig_; + + public enum BatchConfigCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PYSPARK_BATCH(4), + SPARK_BATCH(5), + SPARK_R_BATCH(6), + SPARK_SQL_BATCH(7), + BATCHCONFIG_NOT_SET(0); + private final int value; + + private BatchConfigCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static BatchConfigCase valueOf(int value) { + return forNumber(value); + } + + public static BatchConfigCase forNumber(int value) { + switch (value) { + case 4: + return PYSPARK_BATCH; + case 5: + return SPARK_BATCH; + case 6: + return SPARK_R_BATCH; + case 7: + return SPARK_SQL_BATCH; + case 0: + return BATCHCONFIG_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public BatchConfigCase getBatchConfigCase() { + return BatchConfigCase.forNumber(batchConfigCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Output only. The resource name of the batch.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. The resource name of the batch.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UUID_FIELD_NUMBER = 2; + private volatile java.lang.Object uuid_; + /** + * + * + *
+   * Output only. A batch UUID (Unique Universal Identifier). The service
+   * generates this value when it creates the batch.
+   * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The uuid. + */ + @java.lang.Override + public java.lang.String getUuid() { + java.lang.Object ref = uuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uuid_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. A batch UUID (Unique Universal Identifier). The service
+   * generates this value when it creates the batch.
+   * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for uuid. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUuidBytes() { + java.lang.Object ref = uuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + /** + * + * + *
+   * Output only. The time when the batch was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return createTime_ != null; + } + /** + * + * + *
+   * Output only. The time when the batch was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + /** + * + * + *
+   * Output only. The time when the batch was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return getCreateTime(); + } + + public static final int PYSPARK_BATCH_FIELD_NUMBER = 4; + /** + * + * + *
+   * Optional. PySpark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the pysparkBatch field is set. + */ + @java.lang.Override + public boolean hasPysparkBatch() { + return batchConfigCase_ == 4; + } + /** + * + * + *
+   * Optional. PySpark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The pysparkBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatch getPysparkBatch() { + if (batchConfigCase_ == 4) { + return (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. PySpark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatchOrBuilder getPysparkBatchOrBuilder() { + if (batchConfigCase_ == 4) { + return (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance(); + } + + public static final int SPARK_BATCH_FIELD_NUMBER = 5; + /** + * + * + *
+   * Optional. Spark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkBatch field is set. + */ + @java.lang.Override + public boolean hasSparkBatch() { + return batchConfigCase_ == 5; + } + /** + * + * + *
+   * Optional. Spark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatch getSparkBatch() { + if (batchConfigCase_ == 5) { + return (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. Spark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatchOrBuilder getSparkBatchOrBuilder() { + if (batchConfigCase_ == 5) { + return (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance(); + } + + public static final int SPARK_R_BATCH_FIELD_NUMBER = 6; + /** + * + * + *
+   * Optional. SparkR batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkRBatch field is set. + */ + @java.lang.Override + public boolean hasSparkRBatch() { + return batchConfigCase_ == 6; + } + /** + * + * + *
+   * Optional. SparkR batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkRBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatch getSparkRBatch() { + if (batchConfigCase_ == 6) { + return (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. SparkR batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatchOrBuilder getSparkRBatchOrBuilder() { + if (batchConfigCase_ == 6) { + return (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance(); + } + + public static final int SPARK_SQL_BATCH_FIELD_NUMBER = 7; + /** + * + * + *
+   * Optional. SparkSql batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkSqlBatch field is set. + */ + @java.lang.Override + public boolean hasSparkSqlBatch() { + return batchConfigCase_ == 7; + } + /** + * + * + *
+   * Optional. SparkSql batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkSqlBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatch getSparkSqlBatch() { + if (batchConfigCase_ == 7) { + return (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. SparkSql batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatchOrBuilder getSparkSqlBatchOrBuilder() { + if (batchConfigCase_ == 7) { + return (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance(); + } + + public static final int RUNTIME_INFO_FIELD_NUMBER = 8; + private com.google.cloud.dataproc.v1.RuntimeInfo runtimeInfo_; + /** + * + * + *
+   * Output only. Runtime information about batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the runtimeInfo field is set. + */ + @java.lang.Override + public boolean hasRuntimeInfo() { + return runtimeInfo_ != null; + } + /** + * + * + *
+   * Output only. Runtime information about batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The runtimeInfo. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.RuntimeInfo getRuntimeInfo() { + return runtimeInfo_ == null + ? com.google.cloud.dataproc.v1.RuntimeInfo.getDefaultInstance() + : runtimeInfo_; + } + /** + * + * + *
+   * Output only. Runtime information about batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.RuntimeInfoOrBuilder getRuntimeInfoOrBuilder() { + return getRuntimeInfo(); + } + + public static final int STATE_FIELD_NUMBER = 9; + private int state_; + /** + * + * + *
+   * Output only. The state of the batch.
+   * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+   * Output only. The state of the batch.
+   * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.Batch.State result = + com.google.cloud.dataproc.v1.Batch.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1.Batch.State.UNRECOGNIZED : result; + } + + public static final int STATE_MESSAGE_FIELD_NUMBER = 10; + private volatile java.lang.Object stateMessage_; + /** + * + * + *
+   * Output only. Batch state details, such as a failure
+   * description if the state is `FAILED`.
+   * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The stateMessage. + */ + @java.lang.Override + public java.lang.String getStateMessage() { + java.lang.Object ref = stateMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stateMessage_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. Batch state details, such as a failure
+   * description if the state is `FAILED`.
+   * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for stateMessage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStateMessageBytes() { + java.lang.Object ref = stateMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + stateMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_TIME_FIELD_NUMBER = 11; + private com.google.protobuf.Timestamp stateTime_; + /** + * + * + *
+   * Output only. The time when the batch entered a current state.
+   * 
+ * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the stateTime field is set. + */ + @java.lang.Override + public boolean hasStateTime() { + return stateTime_ != null; + } + /** + * + * + *
+   * Output only. The time when the batch entered a current state.
+   * 
+ * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The stateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStateTime() { + return stateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateTime_; + } + /** + * + * + *
+   * Output only. The time when the batch entered a current state.
+   * 
+ * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStateTimeOrBuilder() { + return getStateTime(); + } + + public static final int CREATOR_FIELD_NUMBER = 12; + private volatile java.lang.Object creator_; + /** + * + * + *
+   * Output only. The email address of the user who created the batch.
+   * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The creator. + */ + @java.lang.Override + public java.lang.String getCreator() { + java.lang.Object ref = creator_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + creator_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. The email address of the user who created the batch.
+   * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for creator. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCreatorBytes() { + java.lang.Object ref = creator_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + creator_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LABELS_FIELD_NUMBER = 13; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetLabels().getMap().containsKey(key); + } + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int RUNTIME_CONFIG_FIELD_NUMBER = 14; + private com.google.cloud.dataproc.v1.RuntimeConfig runtimeConfig_; + /** + * + * + *
+   * Optional. Runtime configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the runtimeConfig field is set. + */ + @java.lang.Override + public boolean hasRuntimeConfig() { + return runtimeConfig_ != null; + } + /** + * + * + *
+   * Optional. Runtime configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The runtimeConfig. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.RuntimeConfig getRuntimeConfig() { + return runtimeConfig_ == null + ? com.google.cloud.dataproc.v1.RuntimeConfig.getDefaultInstance() + : runtimeConfig_; + } + /** + * + * + *
+   * Optional. Runtime configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.RuntimeConfigOrBuilder getRuntimeConfigOrBuilder() { + return getRuntimeConfig(); + } + + public static final int ENVIRONMENT_CONFIG_FIELD_NUMBER = 15; + private com.google.cloud.dataproc.v1.EnvironmentConfig environmentConfig_; + /** + * + * + *
+   * Optional. Environment configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the environmentConfig field is set. + */ + @java.lang.Override + public boolean hasEnvironmentConfig() { + return environmentConfig_ != null; + } + /** + * + * + *
+   * Optional. Environment configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The environmentConfig. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.EnvironmentConfig getEnvironmentConfig() { + return environmentConfig_ == null + ? com.google.cloud.dataproc.v1.EnvironmentConfig.getDefaultInstance() + : environmentConfig_; + } + /** + * + * + *
+   * Optional. Environment configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.EnvironmentConfigOrBuilder getEnvironmentConfigOrBuilder() { + return getEnvironmentConfig(); + } + + public static final int OPERATION_FIELD_NUMBER = 16; + private volatile java.lang.Object operation_; + /** + * + * + *
+   * Output only. The resource name of the operation associated with this batch.
+   * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The operation. + */ + @java.lang.Override + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. The resource name of the operation associated with this batch.
+   * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for operation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_HISTORY_FIELD_NUMBER = 17; + private java.util.List stateHistory_; + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getStateHistoryList() { + return stateHistory_; + } + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getStateHistoryOrBuilderList() { + return stateHistory_; + } + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getStateHistoryCount() { + return stateHistory_.size(); + } + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.StateHistory getStateHistory(int index) { + return stateHistory_.get(index); + } + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.StateHistoryOrBuilder getStateHistoryOrBuilder( + int index) { + return stateHistory_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(uuid_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, uuid_); + } + if (createTime_ != null) { + output.writeMessage(3, getCreateTime()); + } + if (batchConfigCase_ == 4) { + output.writeMessage(4, (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_); + } + if (batchConfigCase_ == 5) { + output.writeMessage(5, (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_); + } + if (batchConfigCase_ == 6) { + output.writeMessage(6, (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_); + } + if (batchConfigCase_ == 7) { + output.writeMessage(7, (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_); + } + if (runtimeInfo_ != null) { + output.writeMessage(8, getRuntimeInfo()); + } + if (state_ != com.google.cloud.dataproc.v1.Batch.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(9, state_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(stateMessage_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 10, stateMessage_); + } + if (stateTime_ != null) { + output.writeMessage(11, getStateTime()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(creator_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 12, creator_); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 13); + if (runtimeConfig_ != null) { + output.writeMessage(14, getRuntimeConfig()); + } + if (environmentConfig_ != null) { + output.writeMessage(15, getEnvironmentConfig()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(operation_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 16, operation_); + } + for (int i = 0; i < stateHistory_.size(); i++) { + output.writeMessage(17, stateHistory_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(uuid_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, uuid_); + } + if (createTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (batchConfigCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_); + } + if (batchConfigCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_); + } + if (batchConfigCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 6, (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_); + } + if (batchConfigCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_); + } + if (runtimeInfo_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getRuntimeInfo()); + } + if (state_ != com.google.cloud.dataproc.v1.Batch.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(9, state_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(stateMessage_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, stateMessage_); + } + if (stateTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getStateTime()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(creator_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(12, creator_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(13, labels__); + } + if (runtimeConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(14, getRuntimeConfig()); + } + if (environmentConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(15, getEnvironmentConfig()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(operation_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(16, operation_); + } + for (int i = 0; i < stateHistory_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(17, stateHistory_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.Batch)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.Batch other = (com.google.cloud.dataproc.v1.Batch) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUuid().equals(other.getUuid())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasRuntimeInfo() != other.hasRuntimeInfo()) return false; + if (hasRuntimeInfo()) { + if (!getRuntimeInfo().equals(other.getRuntimeInfo())) return false; + } + if (state_ != other.state_) return false; + if (!getStateMessage().equals(other.getStateMessage())) return false; + if (hasStateTime() != other.hasStateTime()) return false; + if (hasStateTime()) { + if (!getStateTime().equals(other.getStateTime())) return false; + } + if (!getCreator().equals(other.getCreator())) return false; + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (hasRuntimeConfig() != other.hasRuntimeConfig()) return false; + if (hasRuntimeConfig()) { + if (!getRuntimeConfig().equals(other.getRuntimeConfig())) return false; + } + if (hasEnvironmentConfig() != other.hasEnvironmentConfig()) return false; + if (hasEnvironmentConfig()) { + if (!getEnvironmentConfig().equals(other.getEnvironmentConfig())) return false; + } + if (!getOperation().equals(other.getOperation())) return false; + if (!getStateHistoryList().equals(other.getStateHistoryList())) return false; + if (!getBatchConfigCase().equals(other.getBatchConfigCase())) return false; + switch (batchConfigCase_) { + case 4: + if (!getPysparkBatch().equals(other.getPysparkBatch())) return false; + break; + case 5: + if (!getSparkBatch().equals(other.getSparkBatch())) return false; + break; + case 6: + if (!getSparkRBatch().equals(other.getSparkRBatch())) return false; + break; + case 7: + if (!getSparkSqlBatch().equals(other.getSparkSqlBatch())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + UUID_FIELD_NUMBER; + hash = (53 * hash) + getUuid().hashCode(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasRuntimeInfo()) { + hash = (37 * hash) + RUNTIME_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRuntimeInfo().hashCode(); + } + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + STATE_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getStateMessage().hashCode(); + if (hasStateTime()) { + hash = (37 * hash) + STATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStateTime().hashCode(); + } + hash = (37 * hash) + CREATOR_FIELD_NUMBER; + hash = (53 * hash) + getCreator().hashCode(); + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasRuntimeConfig()) { + hash = (37 * hash) + RUNTIME_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getRuntimeConfig().hashCode(); + } + if (hasEnvironmentConfig()) { + hash = (37 * hash) + ENVIRONMENT_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEnvironmentConfig().hashCode(); + } + hash = (37 * hash) + OPERATION_FIELD_NUMBER; + hash = (53 * hash) + getOperation().hashCode(); + if (getStateHistoryCount() > 0) { + hash = (37 * hash) + STATE_HISTORY_FIELD_NUMBER; + hash = (53 * hash) + getStateHistoryList().hashCode(); + } + switch (batchConfigCase_) { + case 4: + hash = (37 * hash) + PYSPARK_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getPysparkBatch().hashCode(); + break; + case 5: + hash = (37 * hash) + SPARK_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getSparkBatch().hashCode(); + break; + case 6: + hash = (37 * hash) + SPARK_R_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getSparkRBatch().hashCode(); + break; + case 7: + hash = (37 * hash) + SPARK_SQL_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getSparkSqlBatch().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.Batch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.Batch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.Batch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A representation of a batch workload in the service.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.Batch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.Batch) + com.google.cloud.dataproc.v1.BatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 13: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField(int number) { + switch (number) { + case 13: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.Batch.class, + com.google.cloud.dataproc.v1.Batch.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.Batch.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStateHistoryFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + uuid_ = ""; + + if (createTimeBuilder_ == null) { + createTime_ = null; + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + if (runtimeInfoBuilder_ == null) { + runtimeInfo_ = null; + } else { + runtimeInfo_ = null; + runtimeInfoBuilder_ = null; + } + state_ = 0; + + stateMessage_ = ""; + + if (stateTimeBuilder_ == null) { + stateTime_ = null; + } else { + stateTime_ = null; + stateTimeBuilder_ = null; + } + creator_ = ""; + + internalGetMutableLabels().clear(); + if (runtimeConfigBuilder_ == null) { + runtimeConfig_ = null; + } else { + runtimeConfig_ = null; + runtimeConfigBuilder_ = null; + } + if (environmentConfigBuilder_ == null) { + environmentConfig_ = null; + } else { + environmentConfig_ = null; + environmentConfigBuilder_ = null; + } + operation_ = ""; + + if (stateHistoryBuilder_ == null) { + stateHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + stateHistoryBuilder_.clear(); + } + batchConfigCase_ = 0; + batchConfig_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_Batch_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.Batch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch build() { + com.google.cloud.dataproc.v1.Batch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch buildPartial() { + com.google.cloud.dataproc.v1.Batch result = new com.google.cloud.dataproc.v1.Batch(this); + int from_bitField0_ = bitField0_; + result.name_ = name_; + result.uuid_ = uuid_; + if (createTimeBuilder_ == null) { + result.createTime_ = createTime_; + } else { + result.createTime_ = createTimeBuilder_.build(); + } + if (batchConfigCase_ == 4) { + if (pysparkBatchBuilder_ == null) { + result.batchConfig_ = batchConfig_; + } else { + result.batchConfig_ = pysparkBatchBuilder_.build(); + } + } + if (batchConfigCase_ == 5) { + if (sparkBatchBuilder_ == null) { + result.batchConfig_ = batchConfig_; + } else { + result.batchConfig_ = sparkBatchBuilder_.build(); + } + } + if (batchConfigCase_ == 6) { + if (sparkRBatchBuilder_ == null) { + result.batchConfig_ = batchConfig_; + } else { + result.batchConfig_ = sparkRBatchBuilder_.build(); + } + } + if (batchConfigCase_ == 7) { + if (sparkSqlBatchBuilder_ == null) { + result.batchConfig_ = batchConfig_; + } else { + result.batchConfig_ = sparkSqlBatchBuilder_.build(); + } + } + if (runtimeInfoBuilder_ == null) { + result.runtimeInfo_ = runtimeInfo_; + } else { + result.runtimeInfo_ = runtimeInfoBuilder_.build(); + } + result.state_ = state_; + result.stateMessage_ = stateMessage_; + if (stateTimeBuilder_ == null) { + result.stateTime_ = stateTime_; + } else { + result.stateTime_ = stateTimeBuilder_.build(); + } + result.creator_ = creator_; + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + if (runtimeConfigBuilder_ == null) { + result.runtimeConfig_ = runtimeConfig_; + } else { + result.runtimeConfig_ = runtimeConfigBuilder_.build(); + } + if (environmentConfigBuilder_ == null) { + result.environmentConfig_ = environmentConfig_; + } else { + result.environmentConfig_ = environmentConfigBuilder_.build(); + } + result.operation_ = operation_; + if (stateHistoryBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + stateHistory_ = java.util.Collections.unmodifiableList(stateHistory_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.stateHistory_ = stateHistory_; + } else { + result.stateHistory_ = stateHistoryBuilder_.build(); + } + result.batchConfigCase_ = batchConfigCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.Batch) { + return mergeFrom((com.google.cloud.dataproc.v1.Batch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.Batch other) { + if (other == com.google.cloud.dataproc.v1.Batch.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (!other.getUuid().isEmpty()) { + uuid_ = other.uuid_; + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasRuntimeInfo()) { + mergeRuntimeInfo(other.getRuntimeInfo()); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.getStateMessage().isEmpty()) { + stateMessage_ = other.stateMessage_; + onChanged(); + } + if (other.hasStateTime()) { + mergeStateTime(other.getStateTime()); + } + if (!other.getCreator().isEmpty()) { + creator_ = other.creator_; + onChanged(); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + if (other.hasRuntimeConfig()) { + mergeRuntimeConfig(other.getRuntimeConfig()); + } + if (other.hasEnvironmentConfig()) { + mergeEnvironmentConfig(other.getEnvironmentConfig()); + } + if (!other.getOperation().isEmpty()) { + operation_ = other.operation_; + onChanged(); + } + if (stateHistoryBuilder_ == null) { + if (!other.stateHistory_.isEmpty()) { + if (stateHistory_.isEmpty()) { + stateHistory_ = other.stateHistory_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStateHistoryIsMutable(); + stateHistory_.addAll(other.stateHistory_); + } + onChanged(); + } + } else { + if (!other.stateHistory_.isEmpty()) { + if (stateHistoryBuilder_.isEmpty()) { + stateHistoryBuilder_.dispose(); + stateHistoryBuilder_ = null; + stateHistory_ = other.stateHistory_; + bitField0_ = (bitField0_ & ~0x00000002); + stateHistoryBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStateHistoryFieldBuilder() + : null; + } else { + stateHistoryBuilder_.addAllMessages(other.stateHistory_); + } + } + } + switch (other.getBatchConfigCase()) { + case PYSPARK_BATCH: + { + mergePysparkBatch(other.getPysparkBatch()); + break; + } + case SPARK_BATCH: + { + mergeSparkBatch(other.getSparkBatch()); + break; + } + case SPARK_R_BATCH: + { + mergeSparkRBatch(other.getSparkRBatch()); + break; + } + case SPARK_SQL_BATCH: + { + mergeSparkSqlBatch(other.getSparkSqlBatch()); + break; + } + case BATCHCONFIG_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.Batch parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.Batch) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int batchConfigCase_ = 0; + private java.lang.Object batchConfig_; + + public BatchConfigCase getBatchConfigCase() { + return BatchConfigCase.forNumber(batchConfigCase_); + } + + public Builder clearBatchConfig() { + batchConfigCase_ = 0; + batchConfig_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Output only. The resource name of the batch.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. The resource name of the batch.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. The resource name of the batch.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The resource name of the batch.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The resource name of the batch.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private java.lang.Object uuid_ = ""; + /** + * + * + *
+     * Output only. A batch UUID (Unique Universal Identifier). The service
+     * generates this value when it creates the batch.
+     * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The uuid. + */ + public java.lang.String getUuid() { + java.lang.Object ref = uuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + uuid_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. A batch UUID (Unique Universal Identifier). The service
+     * generates this value when it creates the batch.
+     * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for uuid. + */ + public com.google.protobuf.ByteString getUuidBytes() { + java.lang.Object ref = uuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + uuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. A batch UUID (Unique Universal Identifier). The service
+     * generates this value when it creates the batch.
+     * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The uuid to set. + * @return This builder for chaining. + */ + public Builder setUuid(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + uuid_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. A batch UUID (Unique Universal Identifier). The service
+     * generates this value when it creates the batch.
+     * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearUuid() { + + uuid_ = getDefaultInstance().getUuid(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. A batch UUID (Unique Universal Identifier). The service
+     * generates this value when it creates the batch.
+     * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for uuid to set. + * @return This builder for chaining. + */ + public Builder setUuidBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + uuid_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return createTimeBuilder_ != null || createTime_ != null; + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + onChanged(); + } else { + createTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + onChanged(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (createTime_ != null) { + createTime_ = + com.google.protobuf.Timestamp.newBuilder(createTime_).mergeFrom(value).buildPartial(); + } else { + createTime_ = value; + } + onChanged(); + } else { + createTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + if (createTimeBuilder_ == null) { + createTime_ = null; + onChanged(); + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + /** + * + * + *
+     * Output only. The time when the batch was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.PySparkBatch, + com.google.cloud.dataproc.v1.PySparkBatch.Builder, + com.google.cloud.dataproc.v1.PySparkBatchOrBuilder> + pysparkBatchBuilder_; + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the pysparkBatch field is set. + */ + @java.lang.Override + public boolean hasPysparkBatch() { + return batchConfigCase_ == 4; + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The pysparkBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatch getPysparkBatch() { + if (pysparkBatchBuilder_ == null) { + if (batchConfigCase_ == 4) { + return (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance(); + } else { + if (batchConfigCase_ == 4) { + return pysparkBatchBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPysparkBatch(com.google.cloud.dataproc.v1.PySparkBatch value) { + if (pysparkBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + batchConfig_ = value; + onChanged(); + } else { + pysparkBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 4; + return this; + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPysparkBatch( + com.google.cloud.dataproc.v1.PySparkBatch.Builder builderForValue) { + if (pysparkBatchBuilder_ == null) { + batchConfig_ = builderForValue.build(); + onChanged(); + } else { + pysparkBatchBuilder_.setMessage(builderForValue.build()); + } + batchConfigCase_ = 4; + return this; + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePysparkBatch(com.google.cloud.dataproc.v1.PySparkBatch value) { + if (pysparkBatchBuilder_ == null) { + if (batchConfigCase_ == 4 + && batchConfig_ != com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance()) { + batchConfig_ = + com.google.cloud.dataproc.v1.PySparkBatch.newBuilder( + (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + batchConfig_ = value; + } + onChanged(); + } else { + if (batchConfigCase_ == 4) { + pysparkBatchBuilder_.mergeFrom(value); + } + pysparkBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 4; + return this; + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPysparkBatch() { + if (pysparkBatchBuilder_ == null) { + if (batchConfigCase_ == 4) { + batchConfigCase_ = 0; + batchConfig_ = null; + onChanged(); + } + } else { + if (batchConfigCase_ == 4) { + batchConfigCase_ = 0; + batchConfig_ = null; + } + pysparkBatchBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.PySparkBatch.Builder getPysparkBatchBuilder() { + return getPysparkBatchFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatchOrBuilder getPysparkBatchOrBuilder() { + if ((batchConfigCase_ == 4) && (pysparkBatchBuilder_ != null)) { + return pysparkBatchBuilder_.getMessageOrBuilder(); + } else { + if (batchConfigCase_ == 4) { + return (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. PySpark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.PySparkBatch, + com.google.cloud.dataproc.v1.PySparkBatch.Builder, + com.google.cloud.dataproc.v1.PySparkBatchOrBuilder> + getPysparkBatchFieldBuilder() { + if (pysparkBatchBuilder_ == null) { + if (!(batchConfigCase_ == 4)) { + batchConfig_ = com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance(); + } + pysparkBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.PySparkBatch, + com.google.cloud.dataproc.v1.PySparkBatch.Builder, + com.google.cloud.dataproc.v1.PySparkBatchOrBuilder>( + (com.google.cloud.dataproc.v1.PySparkBatch) batchConfig_, + getParentForChildren(), + isClean()); + batchConfig_ = null; + } + batchConfigCase_ = 4; + onChanged(); + ; + return pysparkBatchBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkBatch, + com.google.cloud.dataproc.v1.SparkBatch.Builder, + com.google.cloud.dataproc.v1.SparkBatchOrBuilder> + sparkBatchBuilder_; + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkBatch field is set. + */ + @java.lang.Override + public boolean hasSparkBatch() { + return batchConfigCase_ == 5; + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatch getSparkBatch() { + if (sparkBatchBuilder_ == null) { + if (batchConfigCase_ == 5) { + return (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance(); + } else { + if (batchConfigCase_ == 5) { + return sparkBatchBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkBatch(com.google.cloud.dataproc.v1.SparkBatch value) { + if (sparkBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + batchConfig_ = value; + onChanged(); + } else { + sparkBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 5; + return this; + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkBatch(com.google.cloud.dataproc.v1.SparkBatch.Builder builderForValue) { + if (sparkBatchBuilder_ == null) { + batchConfig_ = builderForValue.build(); + onChanged(); + } else { + sparkBatchBuilder_.setMessage(builderForValue.build()); + } + batchConfigCase_ = 5; + return this; + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSparkBatch(com.google.cloud.dataproc.v1.SparkBatch value) { + if (sparkBatchBuilder_ == null) { + if (batchConfigCase_ == 5 + && batchConfig_ != com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance()) { + batchConfig_ = + com.google.cloud.dataproc.v1.SparkBatch.newBuilder( + (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + batchConfig_ = value; + } + onChanged(); + } else { + if (batchConfigCase_ == 5) { + sparkBatchBuilder_.mergeFrom(value); + } + sparkBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 5; + return this; + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSparkBatch() { + if (sparkBatchBuilder_ == null) { + if (batchConfigCase_ == 5) { + batchConfigCase_ = 0; + batchConfig_ = null; + onChanged(); + } + } else { + if (batchConfigCase_ == 5) { + batchConfigCase_ = 0; + batchConfig_ = null; + } + sparkBatchBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.SparkBatch.Builder getSparkBatchBuilder() { + return getSparkBatchFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatchOrBuilder getSparkBatchOrBuilder() { + if ((batchConfigCase_ == 5) && (sparkBatchBuilder_ != null)) { + return sparkBatchBuilder_.getMessageOrBuilder(); + } else { + if (batchConfigCase_ == 5) { + return (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Spark batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkBatch, + com.google.cloud.dataproc.v1.SparkBatch.Builder, + com.google.cloud.dataproc.v1.SparkBatchOrBuilder> + getSparkBatchFieldBuilder() { + if (sparkBatchBuilder_ == null) { + if (!(batchConfigCase_ == 5)) { + batchConfig_ = com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance(); + } + sparkBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkBatch, + com.google.cloud.dataproc.v1.SparkBatch.Builder, + com.google.cloud.dataproc.v1.SparkBatchOrBuilder>( + (com.google.cloud.dataproc.v1.SparkBatch) batchConfig_, + getParentForChildren(), + isClean()); + batchConfig_ = null; + } + batchConfigCase_ = 5; + onChanged(); + ; + return sparkBatchBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkRBatch, + com.google.cloud.dataproc.v1.SparkRBatch.Builder, + com.google.cloud.dataproc.v1.SparkRBatchOrBuilder> + sparkRBatchBuilder_; + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkRBatch field is set. + */ + @java.lang.Override + public boolean hasSparkRBatch() { + return batchConfigCase_ == 6; + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkRBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatch getSparkRBatch() { + if (sparkRBatchBuilder_ == null) { + if (batchConfigCase_ == 6) { + return (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance(); + } else { + if (batchConfigCase_ == 6) { + return sparkRBatchBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkRBatch(com.google.cloud.dataproc.v1.SparkRBatch value) { + if (sparkRBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + batchConfig_ = value; + onChanged(); + } else { + sparkRBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 6; + return this; + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkRBatch( + com.google.cloud.dataproc.v1.SparkRBatch.Builder builderForValue) { + if (sparkRBatchBuilder_ == null) { + batchConfig_ = builderForValue.build(); + onChanged(); + } else { + sparkRBatchBuilder_.setMessage(builderForValue.build()); + } + batchConfigCase_ = 6; + return this; + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSparkRBatch(com.google.cloud.dataproc.v1.SparkRBatch value) { + if (sparkRBatchBuilder_ == null) { + if (batchConfigCase_ == 6 + && batchConfig_ != com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance()) { + batchConfig_ = + com.google.cloud.dataproc.v1.SparkRBatch.newBuilder( + (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + batchConfig_ = value; + } + onChanged(); + } else { + if (batchConfigCase_ == 6) { + sparkRBatchBuilder_.mergeFrom(value); + } + sparkRBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 6; + return this; + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSparkRBatch() { + if (sparkRBatchBuilder_ == null) { + if (batchConfigCase_ == 6) { + batchConfigCase_ = 0; + batchConfig_ = null; + onChanged(); + } + } else { + if (batchConfigCase_ == 6) { + batchConfigCase_ = 0; + batchConfig_ = null; + } + sparkRBatchBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.SparkRBatch.Builder getSparkRBatchBuilder() { + return getSparkRBatchFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatchOrBuilder getSparkRBatchOrBuilder() { + if ((batchConfigCase_ == 6) && (sparkRBatchBuilder_ != null)) { + return sparkRBatchBuilder_.getMessageOrBuilder(); + } else { + if (batchConfigCase_ == 6) { + return (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. SparkR batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkRBatch, + com.google.cloud.dataproc.v1.SparkRBatch.Builder, + com.google.cloud.dataproc.v1.SparkRBatchOrBuilder> + getSparkRBatchFieldBuilder() { + if (sparkRBatchBuilder_ == null) { + if (!(batchConfigCase_ == 6)) { + batchConfig_ = com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance(); + } + sparkRBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkRBatch, + com.google.cloud.dataproc.v1.SparkRBatch.Builder, + com.google.cloud.dataproc.v1.SparkRBatchOrBuilder>( + (com.google.cloud.dataproc.v1.SparkRBatch) batchConfig_, + getParentForChildren(), + isClean()); + batchConfig_ = null; + } + batchConfigCase_ = 6; + onChanged(); + ; + return sparkRBatchBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkSqlBatch, + com.google.cloud.dataproc.v1.SparkSqlBatch.Builder, + com.google.cloud.dataproc.v1.SparkSqlBatchOrBuilder> + sparkSqlBatchBuilder_; + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkSqlBatch field is set. + */ + @java.lang.Override + public boolean hasSparkSqlBatch() { + return batchConfigCase_ == 7; + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkSqlBatch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatch getSparkSqlBatch() { + if (sparkSqlBatchBuilder_ == null) { + if (batchConfigCase_ == 7) { + return (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance(); + } else { + if (batchConfigCase_ == 7) { + return sparkSqlBatchBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkSqlBatch(com.google.cloud.dataproc.v1.SparkSqlBatch value) { + if (sparkSqlBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + batchConfig_ = value; + onChanged(); + } else { + sparkSqlBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 7; + return this; + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkSqlBatch( + com.google.cloud.dataproc.v1.SparkSqlBatch.Builder builderForValue) { + if (sparkSqlBatchBuilder_ == null) { + batchConfig_ = builderForValue.build(); + onChanged(); + } else { + sparkSqlBatchBuilder_.setMessage(builderForValue.build()); + } + batchConfigCase_ = 7; + return this; + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSparkSqlBatch(com.google.cloud.dataproc.v1.SparkSqlBatch value) { + if (sparkSqlBatchBuilder_ == null) { + if (batchConfigCase_ == 7 + && batchConfig_ != com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance()) { + batchConfig_ = + com.google.cloud.dataproc.v1.SparkSqlBatch.newBuilder( + (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + batchConfig_ = value; + } + onChanged(); + } else { + if (batchConfigCase_ == 7) { + sparkSqlBatchBuilder_.mergeFrom(value); + } + sparkSqlBatchBuilder_.setMessage(value); + } + batchConfigCase_ = 7; + return this; + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSparkSqlBatch() { + if (sparkSqlBatchBuilder_ == null) { + if (batchConfigCase_ == 7) { + batchConfigCase_ = 0; + batchConfig_ = null; + onChanged(); + } + } else { + if (batchConfigCase_ == 7) { + batchConfigCase_ = 0; + batchConfig_ = null; + } + sparkSqlBatchBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.SparkSqlBatch.Builder getSparkSqlBatchBuilder() { + return getSparkSqlBatchFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatchOrBuilder getSparkSqlBatchOrBuilder() { + if ((batchConfigCase_ == 7) && (sparkSqlBatchBuilder_ != null)) { + return sparkSqlBatchBuilder_.getMessageOrBuilder(); + } else { + if (batchConfigCase_ == 7) { + return (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_; + } + return com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. SparkSql batch config.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkSqlBatch, + com.google.cloud.dataproc.v1.SparkSqlBatch.Builder, + com.google.cloud.dataproc.v1.SparkSqlBatchOrBuilder> + getSparkSqlBatchFieldBuilder() { + if (sparkSqlBatchBuilder_ == null) { + if (!(batchConfigCase_ == 7)) { + batchConfig_ = com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance(); + } + sparkSqlBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkSqlBatch, + com.google.cloud.dataproc.v1.SparkSqlBatch.Builder, + com.google.cloud.dataproc.v1.SparkSqlBatchOrBuilder>( + (com.google.cloud.dataproc.v1.SparkSqlBatch) batchConfig_, + getParentForChildren(), + isClean()); + batchConfig_ = null; + } + batchConfigCase_ = 7; + onChanged(); + ; + return sparkSqlBatchBuilder_; + } + + private com.google.cloud.dataproc.v1.RuntimeInfo runtimeInfo_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.RuntimeInfo, + com.google.cloud.dataproc.v1.RuntimeInfo.Builder, + com.google.cloud.dataproc.v1.RuntimeInfoOrBuilder> + runtimeInfoBuilder_; + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the runtimeInfo field is set. + */ + public boolean hasRuntimeInfo() { + return runtimeInfoBuilder_ != null || runtimeInfo_ != null; + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The runtimeInfo. + */ + public com.google.cloud.dataproc.v1.RuntimeInfo getRuntimeInfo() { + if (runtimeInfoBuilder_ == null) { + return runtimeInfo_ == null + ? com.google.cloud.dataproc.v1.RuntimeInfo.getDefaultInstance() + : runtimeInfo_; + } else { + return runtimeInfoBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setRuntimeInfo(com.google.cloud.dataproc.v1.RuntimeInfo value) { + if (runtimeInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + runtimeInfo_ = value; + onChanged(); + } else { + runtimeInfoBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setRuntimeInfo( + com.google.cloud.dataproc.v1.RuntimeInfo.Builder builderForValue) { + if (runtimeInfoBuilder_ == null) { + runtimeInfo_ = builderForValue.build(); + onChanged(); + } else { + runtimeInfoBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeRuntimeInfo(com.google.cloud.dataproc.v1.RuntimeInfo value) { + if (runtimeInfoBuilder_ == null) { + if (runtimeInfo_ != null) { + runtimeInfo_ = + com.google.cloud.dataproc.v1.RuntimeInfo.newBuilder(runtimeInfo_) + .mergeFrom(value) + .buildPartial(); + } else { + runtimeInfo_ = value; + } + onChanged(); + } else { + runtimeInfoBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearRuntimeInfo() { + if (runtimeInfoBuilder_ == null) { + runtimeInfo_ = null; + onChanged(); + } else { + runtimeInfo_ = null; + runtimeInfoBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.dataproc.v1.RuntimeInfo.Builder getRuntimeInfoBuilder() { + + onChanged(); + return getRuntimeInfoFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.dataproc.v1.RuntimeInfoOrBuilder getRuntimeInfoOrBuilder() { + if (runtimeInfoBuilder_ != null) { + return runtimeInfoBuilder_.getMessageOrBuilder(); + } else { + return runtimeInfo_ == null + ? com.google.cloud.dataproc.v1.RuntimeInfo.getDefaultInstance() + : runtimeInfo_; + } + } + /** + * + * + *
+     * Output only. Runtime information about batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.RuntimeInfo, + com.google.cloud.dataproc.v1.RuntimeInfo.Builder, + com.google.cloud.dataproc.v1.RuntimeInfoOrBuilder> + getRuntimeInfoFieldBuilder() { + if (runtimeInfoBuilder_ == null) { + runtimeInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.RuntimeInfo, + com.google.cloud.dataproc.v1.RuntimeInfo.Builder, + com.google.cloud.dataproc.v1.RuntimeInfoOrBuilder>( + getRuntimeInfo(), getParentForChildren(), isClean()); + runtimeInfo_ = null; + } + return runtimeInfoBuilder_; + } + + private int state_ = 0; + /** + * + * + *
+     * Output only. The state of the batch.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+     * Output only. The state of the batch.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + + state_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The state of the batch.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.Batch.State result = + com.google.cloud.dataproc.v1.Batch.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1.Batch.State.UNRECOGNIZED : result; + } + /** + * + * + *
+     * Output only. The state of the batch.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.cloud.dataproc.v1.Batch.State value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The state of the batch.
+     * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private java.lang.Object stateMessage_ = ""; + /** + * + * + *
+     * Output only. Batch state details, such as a failure
+     * description if the state is `FAILED`.
+     * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The stateMessage. + */ + public java.lang.String getStateMessage() { + java.lang.Object ref = stateMessage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stateMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. Batch state details, such as a failure
+     * description if the state is `FAILED`.
+     * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for stateMessage. + */ + public com.google.protobuf.ByteString getStateMessageBytes() { + java.lang.Object ref = stateMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + stateMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. Batch state details, such as a failure
+     * description if the state is `FAILED`.
+     * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The stateMessage to set. + * @return This builder for chaining. + */ + public Builder setStateMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + stateMessage_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Batch state details, such as a failure
+     * description if the state is `FAILED`.
+     * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearStateMessage() { + + stateMessage_ = getDefaultInstance().getStateMessage(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Batch state details, such as a failure
+     * description if the state is `FAILED`.
+     * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for stateMessage to set. + * @return This builder for chaining. + */ + public Builder setStateMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + stateMessage_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp stateTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + stateTimeBuilder_; + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the stateTime field is set. + */ + public boolean hasStateTime() { + return stateTimeBuilder_ != null || stateTime_ != null; + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The stateTime. + */ + public com.google.protobuf.Timestamp getStateTime() { + if (stateTimeBuilder_ == null) { + return stateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateTime_; + } else { + return stateTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStateTime(com.google.protobuf.Timestamp value) { + if (stateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stateTime_ = value; + onChanged(); + } else { + stateTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (stateTimeBuilder_ == null) { + stateTime_ = builderForValue.build(); + onChanged(); + } else { + stateTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeStateTime(com.google.protobuf.Timestamp value) { + if (stateTimeBuilder_ == null) { + if (stateTime_ != null) { + stateTime_ = + com.google.protobuf.Timestamp.newBuilder(stateTime_).mergeFrom(value).buildPartial(); + } else { + stateTime_ = value; + } + onChanged(); + } else { + stateTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStateTime() { + if (stateTimeBuilder_ == null) { + stateTime_ = null; + onChanged(); + } else { + stateTime_ = null; + stateTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getStateTimeBuilder() { + + onChanged(); + return getStateTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getStateTimeOrBuilder() { + if (stateTimeBuilder_ != null) { + return stateTimeBuilder_.getMessageOrBuilder(); + } else { + return stateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateTime_; + } + } + /** + * + * + *
+     * Output only. The time when the batch entered a current state.
+     * 
+ * + * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getStateTimeFieldBuilder() { + if (stateTimeBuilder_ == null) { + stateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStateTime(), getParentForChildren(), isClean()); + stateTime_ = null; + } + return stateTimeBuilder_; + } + + private java.lang.Object creator_ = ""; + /** + * + * + *
+     * Output only. The email address of the user who created the batch.
+     * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The creator. + */ + public java.lang.String getCreator() { + java.lang.Object ref = creator_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + creator_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. The email address of the user who created the batch.
+     * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for creator. + */ + public com.google.protobuf.ByteString getCreatorBytes() { + java.lang.Object ref = creator_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + creator_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. The email address of the user who created the batch.
+     * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The creator to set. + * @return This builder for chaining. + */ + public Builder setCreator(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + creator_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The email address of the user who created the batch.
+     * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearCreator() { + + creator_ = getDefaultInstance().getCreator(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The email address of the user who created the batch.
+     * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for creator to set. + * @return This builder for chaining. + */ + public Builder setCreatorBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + creator_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + onChanged(); + ; + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + * + * + *
+     * Optional. The labels to associate with this batch.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC
+     * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+     * associated with a batch.
+     * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetLabels().getMap().containsKey(key); + } + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + * + * + *
+     * Optional. The labels to associate with this batch.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC
+     * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+     * associated with a batch.
+     * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + * + * + *
+     * Optional. The labels to associate with this batch.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC
+     * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+     * associated with a batch.
+     * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. The labels to associate with this batch.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC
+     * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+     * associated with a batch.
+     * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. The labels to associate with this batch.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC
+     * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+     * associated with a batch.
+     * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + return internalGetMutableLabels().getMutableMap(); + } + /** + * + * + *
+     * Optional. The labels to associate with this batch.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC
+     * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+     * associated with a batch.
+     * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + if (value == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableLabels().getMutableMap().put(key, value); + return this; + } + /** + * + * + *
+     * Optional. The labels to associate with this batch.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC
+     * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+     * associated with a batch.
+     * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1.RuntimeConfig runtimeConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.RuntimeConfig, + com.google.cloud.dataproc.v1.RuntimeConfig.Builder, + com.google.cloud.dataproc.v1.RuntimeConfigOrBuilder> + runtimeConfigBuilder_; + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the runtimeConfig field is set. + */ + public boolean hasRuntimeConfig() { + return runtimeConfigBuilder_ != null || runtimeConfig_ != null; + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The runtimeConfig. + */ + public com.google.cloud.dataproc.v1.RuntimeConfig getRuntimeConfig() { + if (runtimeConfigBuilder_ == null) { + return runtimeConfig_ == null + ? com.google.cloud.dataproc.v1.RuntimeConfig.getDefaultInstance() + : runtimeConfig_; + } else { + return runtimeConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRuntimeConfig(com.google.cloud.dataproc.v1.RuntimeConfig value) { + if (runtimeConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + runtimeConfig_ = value; + onChanged(); + } else { + runtimeConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRuntimeConfig( + com.google.cloud.dataproc.v1.RuntimeConfig.Builder builderForValue) { + if (runtimeConfigBuilder_ == null) { + runtimeConfig_ = builderForValue.build(); + onChanged(); + } else { + runtimeConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRuntimeConfig(com.google.cloud.dataproc.v1.RuntimeConfig value) { + if (runtimeConfigBuilder_ == null) { + if (runtimeConfig_ != null) { + runtimeConfig_ = + com.google.cloud.dataproc.v1.RuntimeConfig.newBuilder(runtimeConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + runtimeConfig_ = value; + } + onChanged(); + } else { + runtimeConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRuntimeConfig() { + if (runtimeConfigBuilder_ == null) { + runtimeConfig_ = null; + onChanged(); + } else { + runtimeConfig_ = null; + runtimeConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.RuntimeConfig.Builder getRuntimeConfigBuilder() { + + onChanged(); + return getRuntimeConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.RuntimeConfigOrBuilder getRuntimeConfigOrBuilder() { + if (runtimeConfigBuilder_ != null) { + return runtimeConfigBuilder_.getMessageOrBuilder(); + } else { + return runtimeConfig_ == null + ? com.google.cloud.dataproc.v1.RuntimeConfig.getDefaultInstance() + : runtimeConfig_; + } + } + /** + * + * + *
+     * Optional. Runtime configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.RuntimeConfig, + com.google.cloud.dataproc.v1.RuntimeConfig.Builder, + com.google.cloud.dataproc.v1.RuntimeConfigOrBuilder> + getRuntimeConfigFieldBuilder() { + if (runtimeConfigBuilder_ == null) { + runtimeConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.RuntimeConfig, + com.google.cloud.dataproc.v1.RuntimeConfig.Builder, + com.google.cloud.dataproc.v1.RuntimeConfigOrBuilder>( + getRuntimeConfig(), getParentForChildren(), isClean()); + runtimeConfig_ = null; + } + return runtimeConfigBuilder_; + } + + private com.google.cloud.dataproc.v1.EnvironmentConfig environmentConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.EnvironmentConfig, + com.google.cloud.dataproc.v1.EnvironmentConfig.Builder, + com.google.cloud.dataproc.v1.EnvironmentConfigOrBuilder> + environmentConfigBuilder_; + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the environmentConfig field is set. + */ + public boolean hasEnvironmentConfig() { + return environmentConfigBuilder_ != null || environmentConfig_ != null; + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The environmentConfig. + */ + public com.google.cloud.dataproc.v1.EnvironmentConfig getEnvironmentConfig() { + if (environmentConfigBuilder_ == null) { + return environmentConfig_ == null + ? com.google.cloud.dataproc.v1.EnvironmentConfig.getDefaultInstance() + : environmentConfig_; + } else { + return environmentConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEnvironmentConfig(com.google.cloud.dataproc.v1.EnvironmentConfig value) { + if (environmentConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + environmentConfig_ = value; + onChanged(); + } else { + environmentConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEnvironmentConfig( + com.google.cloud.dataproc.v1.EnvironmentConfig.Builder builderForValue) { + if (environmentConfigBuilder_ == null) { + environmentConfig_ = builderForValue.build(); + onChanged(); + } else { + environmentConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEnvironmentConfig(com.google.cloud.dataproc.v1.EnvironmentConfig value) { + if (environmentConfigBuilder_ == null) { + if (environmentConfig_ != null) { + environmentConfig_ = + com.google.cloud.dataproc.v1.EnvironmentConfig.newBuilder(environmentConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + environmentConfig_ = value; + } + onChanged(); + } else { + environmentConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEnvironmentConfig() { + if (environmentConfigBuilder_ == null) { + environmentConfig_ = null; + onChanged(); + } else { + environmentConfig_ = null; + environmentConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.EnvironmentConfig.Builder getEnvironmentConfigBuilder() { + + onChanged(); + return getEnvironmentConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.EnvironmentConfigOrBuilder getEnvironmentConfigOrBuilder() { + if (environmentConfigBuilder_ != null) { + return environmentConfigBuilder_.getMessageOrBuilder(); + } else { + return environmentConfig_ == null + ? com.google.cloud.dataproc.v1.EnvironmentConfig.getDefaultInstance() + : environmentConfig_; + } + } + /** + * + * + *
+     * Optional. Environment configuration for the batch execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.EnvironmentConfig, + com.google.cloud.dataproc.v1.EnvironmentConfig.Builder, + com.google.cloud.dataproc.v1.EnvironmentConfigOrBuilder> + getEnvironmentConfigFieldBuilder() { + if (environmentConfigBuilder_ == null) { + environmentConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.EnvironmentConfig, + com.google.cloud.dataproc.v1.EnvironmentConfig.Builder, + com.google.cloud.dataproc.v1.EnvironmentConfigOrBuilder>( + getEnvironmentConfig(), getParentForChildren(), isClean()); + environmentConfig_ = null; + } + return environmentConfigBuilder_; + } + + private java.lang.Object operation_ = ""; + /** + * + * + *
+     * Output only. The resource name of the operation associated with this batch.
+     * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The operation. + */ + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. The resource name of the operation associated with this batch.
+     * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for operation. + */ + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. The resource name of the operation associated with this batch.
+     * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The operation to set. + * @return This builder for chaining. + */ + public Builder setOperation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + operation_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The resource name of the operation associated with this batch.
+     * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearOperation() { + + operation_ = getDefaultInstance().getOperation(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The resource name of the operation associated with this batch.
+     * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for operation to set. + * @return This builder for chaining. + */ + public Builder setOperationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + operation_ = value; + onChanged(); + return this; + } + + private java.util.List stateHistory_ = + java.util.Collections.emptyList(); + + private void ensureStateHistoryIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + stateHistory_ = + new java.util.ArrayList(stateHistory_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch.StateHistory, + com.google.cloud.dataproc.v1.Batch.StateHistory.Builder, + com.google.cloud.dataproc.v1.Batch.StateHistoryOrBuilder> + stateHistoryBuilder_; + + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getStateHistoryList() { + if (stateHistoryBuilder_ == null) { + return java.util.Collections.unmodifiableList(stateHistory_); + } else { + return stateHistoryBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getStateHistoryCount() { + if (stateHistoryBuilder_ == null) { + return stateHistory_.size(); + } else { + return stateHistoryBuilder_.getCount(); + } + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.dataproc.v1.Batch.StateHistory getStateHistory(int index) { + if (stateHistoryBuilder_ == null) { + return stateHistory_.get(index); + } else { + return stateHistoryBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStateHistory( + int index, com.google.cloud.dataproc.v1.Batch.StateHistory value) { + if (stateHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStateHistoryIsMutable(); + stateHistory_.set(index, value); + onChanged(); + } else { + stateHistoryBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStateHistory( + int index, com.google.cloud.dataproc.v1.Batch.StateHistory.Builder builderForValue) { + if (stateHistoryBuilder_ == null) { + ensureStateHistoryIsMutable(); + stateHistory_.set(index, builderForValue.build()); + onChanged(); + } else { + stateHistoryBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStateHistory(com.google.cloud.dataproc.v1.Batch.StateHistory value) { + if (stateHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStateHistoryIsMutable(); + stateHistory_.add(value); + onChanged(); + } else { + stateHistoryBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStateHistory( + int index, com.google.cloud.dataproc.v1.Batch.StateHistory value) { + if (stateHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStateHistoryIsMutable(); + stateHistory_.add(index, value); + onChanged(); + } else { + stateHistoryBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStateHistory( + com.google.cloud.dataproc.v1.Batch.StateHistory.Builder builderForValue) { + if (stateHistoryBuilder_ == null) { + ensureStateHistoryIsMutable(); + stateHistory_.add(builderForValue.build()); + onChanged(); + } else { + stateHistoryBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStateHistory( + int index, com.google.cloud.dataproc.v1.Batch.StateHistory.Builder builderForValue) { + if (stateHistoryBuilder_ == null) { + ensureStateHistoryIsMutable(); + stateHistory_.add(index, builderForValue.build()); + onChanged(); + } else { + stateHistoryBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllStateHistory( + java.lang.Iterable values) { + if (stateHistoryBuilder_ == null) { + ensureStateHistoryIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, stateHistory_); + onChanged(); + } else { + stateHistoryBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStateHistory() { + if (stateHistoryBuilder_ == null) { + stateHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + stateHistoryBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeStateHistory(int index) { + if (stateHistoryBuilder_ == null) { + ensureStateHistoryIsMutable(); + stateHistory_.remove(index); + onChanged(); + } else { + stateHistoryBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.dataproc.v1.Batch.StateHistory.Builder getStateHistoryBuilder( + int index) { + return getStateHistoryFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.dataproc.v1.Batch.StateHistoryOrBuilder getStateHistoryOrBuilder( + int index) { + if (stateHistoryBuilder_ == null) { + return stateHistory_.get(index); + } else { + return stateHistoryBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStateHistoryOrBuilderList() { + if (stateHistoryBuilder_ != null) { + return stateHistoryBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(stateHistory_); + } + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.dataproc.v1.Batch.StateHistory.Builder addStateHistoryBuilder() { + return getStateHistoryFieldBuilder() + .addBuilder(com.google.cloud.dataproc.v1.Batch.StateHistory.getDefaultInstance()); + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.dataproc.v1.Batch.StateHistory.Builder addStateHistoryBuilder( + int index) { + return getStateHistoryFieldBuilder() + .addBuilder(index, com.google.cloud.dataproc.v1.Batch.StateHistory.getDefaultInstance()); + } + /** + * + * + *
+     * Output only. Historical state information for the batch.
+     * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStateHistoryBuilderList() { + return getStateHistoryFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch.StateHistory, + com.google.cloud.dataproc.v1.Batch.StateHistory.Builder, + com.google.cloud.dataproc.v1.Batch.StateHistoryOrBuilder> + getStateHistoryFieldBuilder() { + if (stateHistoryBuilder_ == null) { + stateHistoryBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch.StateHistory, + com.google.cloud.dataproc.v1.Batch.StateHistory.Builder, + com.google.cloud.dataproc.v1.Batch.StateHistoryOrBuilder>( + stateHistory_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + stateHistory_ = null; + } + return stateHistoryBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.Batch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Batch) + private static final com.google.cloud.dataproc.v1.Batch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.Batch(); + } + + public static com.google.cloud.dataproc.v1.Batch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Batch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Batch(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchName.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchName.java new file mode 100644 index 00000000..cc26cad6 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchName.java @@ -0,0 +1,223 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.dataproc.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class BatchName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_BATCH = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/batches/{batch}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String batch; + + @Deprecated + protected BatchName() { + project = null; + location = null; + batch = null; + } + + private BatchName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + batch = Preconditions.checkNotNull(builder.getBatch()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getBatch() { + return batch; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static BatchName of(String project, String location, String batch) { + return newBuilder().setProject(project).setLocation(location).setBatch(batch).build(); + } + + public static String format(String project, String location, String batch) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setBatch(batch) + .build() + .toString(); + } + + public static BatchName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_BATCH.validatedMatch( + formattedString, "BatchName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("location"), matchMap.get("batch")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (BatchName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_BATCH.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (batch != null) { + fieldMapBuilder.put("batch", batch); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_BATCH.instantiate( + "project", project, "location", location, "batch", batch); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null || getClass() == o.getClass()) { + BatchName that = ((BatchName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.batch, that.batch); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(batch); + return h; + } + + /** Builder for projects/{project}/locations/{location}/batches/{batch}. */ + public static class Builder { + private String project; + private String location; + private String batch; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getBatch() { + return batch; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setBatch(String batch) { + this.batch = batch; + return this; + } + + private Builder(BatchName batchName) { + this.project = batchName.project; + this.location = batchName.location; + this.batch = batchName.batch; + } + + public BatchName build() { + return new BatchName(this); + } + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchOrBuilder.java new file mode 100644 index 00000000..c8c898ce --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchOrBuilder.java @@ -0,0 +1,694 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface BatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.Batch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. The resource name of the batch.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Output only. The resource name of the batch.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. A batch UUID (Unique Universal Identifier). The service
+   * generates this value when it creates the batch.
+   * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The uuid. + */ + java.lang.String getUuid(); + /** + * + * + *
+   * Output only. A batch UUID (Unique Universal Identifier). The service
+   * generates this value when it creates the batch.
+   * 
+ * + * string uuid = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for uuid. + */ + com.google.protobuf.ByteString getUuidBytes(); + + /** + * + * + *
+   * Output only. The time when the batch was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + /** + * + * + *
+   * Output only. The time when the batch was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + /** + * + * + *
+   * Output only. The time when the batch was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. PySpark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the pysparkBatch field is set. + */ + boolean hasPysparkBatch(); + /** + * + * + *
+   * Optional. PySpark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The pysparkBatch. + */ + com.google.cloud.dataproc.v1.PySparkBatch getPysparkBatch(); + /** + * + * + *
+   * Optional. PySpark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PySparkBatch pyspark_batch = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.PySparkBatchOrBuilder getPysparkBatchOrBuilder(); + + /** + * + * + *
+   * Optional. Spark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkBatch field is set. + */ + boolean hasSparkBatch(); + /** + * + * + *
+   * Optional. Spark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkBatch. + */ + com.google.cloud.dataproc.v1.SparkBatch getSparkBatch(); + /** + * + * + *
+   * Optional. Spark batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkBatch spark_batch = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.SparkBatchOrBuilder getSparkBatchOrBuilder(); + + /** + * + * + *
+   * Optional. SparkR batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkRBatch field is set. + */ + boolean hasSparkRBatch(); + /** + * + * + *
+   * Optional. SparkR batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkRBatch. + */ + com.google.cloud.dataproc.v1.SparkRBatch getSparkRBatch(); + /** + * + * + *
+   * Optional. SparkR batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRBatch spark_r_batch = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.SparkRBatchOrBuilder getSparkRBatchOrBuilder(); + + /** + * + * + *
+   * Optional. SparkSql batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkSqlBatch field is set. + */ + boolean hasSparkSqlBatch(); + /** + * + * + *
+   * Optional. SparkSql batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkSqlBatch. + */ + com.google.cloud.dataproc.v1.SparkSqlBatch getSparkSqlBatch(); + /** + * + * + *
+   * Optional. SparkSql batch config.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlBatch spark_sql_batch = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.SparkSqlBatchOrBuilder getSparkSqlBatchOrBuilder(); + + /** + * + * + *
+   * Output only. Runtime information about batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the runtimeInfo field is set. + */ + boolean hasRuntimeInfo(); + /** + * + * + *
+   * Output only. Runtime information about batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The runtimeInfo. + */ + com.google.cloud.dataproc.v1.RuntimeInfo getRuntimeInfo(); + /** + * + * + *
+   * Output only. Runtime information about batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeInfo runtime_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.dataproc.v1.RuntimeInfoOrBuilder getRuntimeInfoOrBuilder(); + + /** + * + * + *
+   * Output only. The state of the batch.
+   * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + /** + * + * + *
+   * Output only. The state of the batch.
+   * 
+ * + * + * .google.cloud.dataproc.v1.Batch.State state = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.cloud.dataproc.v1.Batch.State getState(); + + /** + * + * + *
+   * Output only. Batch state details, such as a failure
+   * description if the state is `FAILED`.
+   * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The stateMessage. + */ + java.lang.String getStateMessage(); + /** + * + * + *
+   * Output only. Batch state details, such as a failure
+   * description if the state is `FAILED`.
+   * 
+ * + * string state_message = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for stateMessage. + */ + com.google.protobuf.ByteString getStateMessageBytes(); + + /** + * + * + *
+   * Output only. The time when the batch entered a current state.
+   * 
+ * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the stateTime field is set. + */ + boolean hasStateTime(); + /** + * + * + *
+   * Output only. The time when the batch entered a current state.
+   * 
+ * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The stateTime. + */ + com.google.protobuf.Timestamp getStateTime(); + /** + * + * + *
+   * Output only. The time when the batch entered a current state.
+   * 
+ * + * .google.protobuf.Timestamp state_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getStateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The email address of the user who created the batch.
+   * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The creator. + */ + java.lang.String getCreator(); + /** + * + * + *
+   * Output only. The email address of the user who created the batch.
+   * 
+ * + * string creator = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for creator. + */ + com.google.protobuf.ByteString getCreatorBytes(); + + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + int getLabelsCount(); + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + boolean containsLabels(java.lang.String key); + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.util.Map getLabelsMap(); + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.String defaultValue); + /** + * + * + *
+   * Optional. The labels to associate with this batch.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC
+   * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+   * associated with a batch.
+   * 
+ * + * map<string, string> labels = 13 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. Runtime configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the runtimeConfig field is set. + */ + boolean hasRuntimeConfig(); + /** + * + * + *
+   * Optional. Runtime configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The runtimeConfig. + */ + com.google.cloud.dataproc.v1.RuntimeConfig getRuntimeConfig(); + /** + * + * + *
+   * Optional. Runtime configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.RuntimeConfig runtime_config = 14 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.RuntimeConfigOrBuilder getRuntimeConfigOrBuilder(); + + /** + * + * + *
+   * Optional. Environment configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the environmentConfig field is set. + */ + boolean hasEnvironmentConfig(); + /** + * + * + *
+   * Optional. Environment configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The environmentConfig. + */ + com.google.cloud.dataproc.v1.EnvironmentConfig getEnvironmentConfig(); + /** + * + * + *
+   * Optional. Environment configuration for the batch execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.EnvironmentConfig environment_config = 15 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.EnvironmentConfigOrBuilder getEnvironmentConfigOrBuilder(); + + /** + * + * + *
+   * Output only. The resource name of the operation associated with this batch.
+   * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The operation. + */ + java.lang.String getOperation(); + /** + * + * + *
+   * Output only. The resource name of the operation associated with this batch.
+   * 
+ * + * string operation = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for operation. + */ + com.google.protobuf.ByteString getOperationBytes(); + + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getStateHistoryList(); + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.dataproc.v1.Batch.StateHistory getStateHistory(int index); + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getStateHistoryCount(); + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getStateHistoryOrBuilderList(); + /** + * + * + *
+   * Output only. Historical state information for the batch.
+   * 
+ * + * + * repeated .google.cloud.dataproc.v1.Batch.StateHistory state_history = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.dataproc.v1.Batch.StateHistoryOrBuilder getStateHistoryOrBuilder(int index); + + public com.google.cloud.dataproc.v1.Batch.BatchConfigCase getBatchConfigCase(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchesProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchesProto.java new file mode 100644 index 00000000..aea24497 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BatchesProto.java @@ -0,0 +1,357 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public final class BatchesProto { + private BatchesProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_CreateBatchRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_CreateBatchRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_GetBatchRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_GetBatchRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_ListBatchesRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_ListBatchesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_ListBatchesResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_ListBatchesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_Batch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_Batch_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_Batch_StateHistory_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_Batch_StateHistory_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_Batch_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_Batch_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_PySparkBatch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_PySparkBatch_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_SparkBatch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_SparkBatch_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_SparkRBatch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_SparkRBatch_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_QueryVariablesEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_QueryVariablesEntry_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n&google/cloud/dataproc/v1/batches.proto" + + "\022\030google.cloud.dataproc.v1\032\034google/api/a" + + "nnotations.proto\032\027google/api/client.prot" + + "o\032\037google/api/field_behavior.proto\032\031goog" + + "le/api/resource.proto\032%google/cloud/data" + + "proc/v1/shared.proto\032#google/longrunning" + + "/operations.proto\032\033google/protobuf/empty" + + ".proto\032\037google/protobuf/timestamp.proto\"" + + "\260\001\n\022CreateBatchRequest\0225\n\006parent\030\001 \001(\tB%" + + "\340A\002\372A\037\022\035dataproc.googleapis.com/Batch\0223\n" + + "\005batch\030\002 \001(\0132\037.google.cloud.dataproc.v1." + + "BatchB\003\340A\002\022\025\n\010batch_id\030\003 \001(\tB\003\340A\001\022\027\n\nreq" + + "uest_id\030\004 \001(\tB\003\340A\001\"F\n\017GetBatchRequest\0223\n" + + "\004name\030\001 \001(\tB%\340A\002\372A\037\n\035dataproc.googleapis" + + ".com/Batch\"|\n\022ListBatchesRequest\0225\n\006pare" + + "nt\030\001 \001(\tB%\340A\002\372A\037\022\035dataproc.googleapis.co" + + "m/Batch\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npage_" + + "token\030\003 \001(\tB\003\340A\001\"`\n\023ListBatchesResponse\022" + + "0\n\007batches\030\001 \003(\0132\037.google.cloud.dataproc" + + ".v1.Batch\022\027\n\017next_page_token\030\002 \001(\t\"I\n\022De" + + "leteBatchRequest\0223\n\004name\030\001 \001(\tB%\340A\002\372A\037\n\035" + + "dataproc.googleapis.com/Batch\"\310\n\n\005Batch\022" + + "\021\n\004name\030\001 \001(\tB\003\340A\003\022\021\n\004uuid\030\002 \001(\tB\003\340A\003\0224\n" + + "\013create_time\030\003 \001(\0132\032.google.protobuf.Tim" + + "estampB\003\340A\003\022D\n\rpyspark_batch\030\004 \001(\0132&.goo" + + "gle.cloud.dataproc.v1.PySparkBatchB\003\340A\001H" + + "\000\022@\n\013spark_batch\030\005 \001(\0132$.google.cloud.da" + + "taproc.v1.SparkBatchB\003\340A\001H\000\022C\n\rspark_r_b" + + "atch\030\006 \001(\0132%.google.cloud.dataproc.v1.Sp" + + "arkRBatchB\003\340A\001H\000\022G\n\017spark_sql_batch\030\007 \001(" + + "\0132\'.google.cloud.dataproc.v1.SparkSqlBat" + + "chB\003\340A\001H\000\022@\n\014runtime_info\030\010 \001(\0132%.google" + + ".cloud.dataproc.v1.RuntimeInfoB\003\340A\003\0229\n\005s" + + "tate\030\t \001(\0162%.google.cloud.dataproc.v1.Ba" + + "tch.StateB\003\340A\003\022\032\n\rstate_message\030\n \001(\tB\003\340" + + "A\003\0223\n\nstate_time\030\013 \001(\0132\032.google.protobuf" + + ".TimestampB\003\340A\003\022\024\n\007creator\030\014 \001(\tB\003\340A\003\022@\n" + + "\006labels\030\r \003(\0132+.google.cloud.dataproc.v1" + + ".Batch.LabelsEntryB\003\340A\001\022D\n\016runtime_confi" + + "g\030\016 \001(\0132\'.google.cloud.dataproc.v1.Runti" + + "meConfigB\003\340A\001\022L\n\022environment_config\030\017 \001(" + + "\0132+.google.cloud.dataproc.v1.Environment" + + "ConfigB\003\340A\001\022\026\n\toperation\030\020 \001(\tB\003\340A\003\022H\n\rs" + + "tate_history\030\021 \003(\0132,.google.cloud.datapr" + + "oc.v1.Batch.StateHistoryB\003\340A\003\032\240\001\n\014StateH" + + "istory\0229\n\005state\030\001 \001(\0162%.google.cloud.dat" + + "aproc.v1.Batch.StateB\003\340A\003\022\032\n\rstate_messa" + + "ge\030\002 \001(\tB\003\340A\003\0229\n\020state_start_time\030\003 \001(\0132" + + "\032.google.protobuf.TimestampB\003\340A\003\032-\n\013Labe" + + "lsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"" + + "r\n\005State\022\025\n\021STATE_UNSPECIFIED\020\000\022\013\n\007PENDI" + + "NG\020\001\022\013\n\007RUNNING\020\002\022\016\n\nCANCELLING\020\003\022\r\n\tCAN" + + "CELLED\020\004\022\r\n\tSUCCEEDED\020\005\022\n\n\006FAILED\020\006:[\352AX" + + "\n\035dataproc.googleapis.com/Batch\0227project" + + "s/{project}/locations/{location}/batches" + + "/{batch}B\016\n\014batch_config\"\262\001\n\014PySparkBatc" + + "h\022!\n\024main_python_file_uri\030\001 \001(\tB\003\340A\002\022\021\n\004" + + "args\030\002 \003(\tB\003\340A\001\022\035\n\020python_file_uris\030\003 \003(" + + "\tB\003\340A\001\022\032\n\rjar_file_uris\030\004 \003(\tB\003\340A\001\022\026\n\tfi" + + "le_uris\030\005 \003(\tB\003\340A\001\022\031\n\014archive_uris\030\006 \003(\t" + + "B\003\340A\001\"\265\001\n\nSparkBatch\022 \n\021main_jar_file_ur" + + "i\030\001 \001(\tB\003\340A\001H\000\022\031\n\nmain_class\030\002 \001(\tB\003\340A\001H" + + "\000\022\021\n\004args\030\003 \003(\tB\003\340A\001\022\032\n\rjar_file_uris\030\004 " + + "\003(\tB\003\340A\001\022\026\n\tfile_uris\030\005 \003(\tB\003\340A\001\022\031\n\014arch" + + "ive_uris\030\006 \003(\tB\003\340A\001B\010\n\006driver\"q\n\013SparkRB" + + "atch\022\034\n\017main_r_file_uri\030\001 \001(\tB\003\340A\002\022\021\n\004ar" + + "gs\030\002 \003(\tB\003\340A\001\022\026\n\tfile_uris\030\003 \003(\tB\003\340A\001\022\031\n" + + "\014archive_uris\030\004 \003(\tB\003\340A\001\"\332\001\n\rSparkSqlBat" + + "ch\022\033\n\016query_file_uri\030\001 \001(\tB\003\340A\002\022Y\n\017query" + + "_variables\030\002 \003(\0132;.google.cloud.dataproc" + + ".v1.SparkSqlBatch.QueryVariablesEntryB\003\340" + + "A\001\022\032\n\rjar_file_uris\030\003 \003(\tB\003\340A\001\0325\n\023QueryV" + + "ariablesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(" + + "\t:\0028\0012\235\006\n\017BatchController\022\352\001\n\013CreateBatc" + + "h\022,.google.cloud.dataproc.v1.CreateBatch" + + "Request\032\035.google.longrunning.Operation\"\215" + + "\001\202\323\344\223\0024\"+/v1/{parent=projects/*/location" + + "s/*}/batches:\005batch\332A\025parent,batch,batch" + + "_id\312A8\n\005Batch\022/google.cloud.dataproc.v1." + + "BatchOperationMetadata\022\222\001\n\010GetBatch\022).go" + + "ogle.cloud.dataproc.v1.GetBatchRequest\032\037" + + ".google.cloud.dataproc.v1.Batch\":\202\323\344\223\002-\022" + + "+/v1/{name=projects/*/locations/*/batche" + + "s/*}\332A\004name\022\250\001\n\013ListBatches\022,.google.clo" + + "ud.dataproc.v1.ListBatchesRequest\032-.goog" + + "le.cloud.dataproc.v1.ListBatchesResponse" + + "\"<\202\323\344\223\002-\022+/v1/{parent=projects/*/locatio" + + "ns/*}/batches\332A\006parent\022\217\001\n\013DeleteBatch\022," + + ".google.cloud.dataproc.v1.DeleteBatchReq" + + "uest\032\026.google.protobuf.Empty\":\202\323\344\223\002-*+/v" + + "1/{name=projects/*/locations/*/batches/*" + + "}\332A\004name\032K\312A\027dataproc.googleapis.com\322A.h" + + "ttps://www.googleapis.com/auth/cloud-pla" + + "tformBp\n\034com.google.cloud.dataproc.v1B\014B" + + "atchesProtoP\001Z@google.golang.org/genprot" + + "o/googleapis/cloud/dataproc/v1;dataprocb" + + "\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.dataproc.v1.SharedProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_dataproc_v1_CreateBatchRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_dataproc_v1_CreateBatchRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_CreateBatchRequest_descriptor, + new java.lang.String[] { + "Parent", "Batch", "BatchId", "RequestId", + }); + internal_static_google_cloud_dataproc_v1_GetBatchRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_dataproc_v1_GetBatchRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_GetBatchRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_dataproc_v1_ListBatchesRequest_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_dataproc_v1_ListBatchesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_ListBatchesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_cloud_dataproc_v1_ListBatchesResponse_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_dataproc_v1_ListBatchesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_ListBatchesResponse_descriptor, + new java.lang.String[] { + "Batches", "NextPageToken", + }); + internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_dataproc_v1_Batch_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_dataproc_v1_Batch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_Batch_descriptor, + new java.lang.String[] { + "Name", + "Uuid", + "CreateTime", + "PysparkBatch", + "SparkBatch", + "SparkRBatch", + "SparkSqlBatch", + "RuntimeInfo", + "State", + "StateMessage", + "StateTime", + "Creator", + "Labels", + "RuntimeConfig", + "EnvironmentConfig", + "Operation", + "StateHistory", + "BatchConfig", + }); + internal_static_google_cloud_dataproc_v1_Batch_StateHistory_descriptor = + internal_static_google_cloud_dataproc_v1_Batch_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1_Batch_StateHistory_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_Batch_StateHistory_descriptor, + new java.lang.String[] { + "State", "StateMessage", "StateStartTime", + }); + internal_static_google_cloud_dataproc_v1_Batch_LabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1_Batch_descriptor.getNestedTypes().get(1); + internal_static_google_cloud_dataproc_v1_Batch_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_Batch_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_dataproc_v1_PySparkBatch_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_dataproc_v1_PySparkBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_PySparkBatch_descriptor, + new java.lang.String[] { + "MainPythonFileUri", + "Args", + "PythonFileUris", + "JarFileUris", + "FileUris", + "ArchiveUris", + }); + internal_static_google_cloud_dataproc_v1_SparkBatch_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_dataproc_v1_SparkBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_SparkBatch_descriptor, + new java.lang.String[] { + "MainJarFileUri", + "MainClass", + "Args", + "JarFileUris", + "FileUris", + "ArchiveUris", + "Driver", + }); + internal_static_google_cloud_dataproc_v1_SparkRBatch_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_dataproc_v1_SparkRBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_SparkRBatch_descriptor, + new java.lang.String[] { + "MainRFileUri", "Args", "FileUris", "ArchiveUris", + }); + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_descriptor, + new java.lang.String[] { + "QueryFileUri", "QueryVariables", "JarFileUris", + }); + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_QueryVariablesEntry_descriptor = + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_QueryVariablesEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_SparkSqlBatch_QueryVariablesEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + registry.add(com.google.longrunning.OperationsProto.operationInfo); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.dataproc.v1.SharedProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequest.java new file mode 100644 index 00000000..29c5ec47 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequest.java @@ -0,0 +1,1364 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A request to create a batch workload.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.CreateBatchRequest} + */ +public final class CreateBatchRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.CreateBatchRequest) + CreateBatchRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use CreateBatchRequest.newBuilder() to construct. + private CreateBatchRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateBatchRequest() { + parent_ = ""; + batchId_ = ""; + requestId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateBatchRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private CreateBatchRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: + { + com.google.cloud.dataproc.v1.Batch.Builder subBuilder = null; + if (batch_ != null) { + subBuilder = batch_.toBuilder(); + } + batch_ = + input.readMessage(com.google.cloud.dataproc.v1.Batch.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(batch_); + batch_ = subBuilder.buildPartial(); + } + + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + + batchId_ = s; + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + + requestId_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_CreateBatchRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_CreateBatchRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.CreateBatchRequest.class, + com.google.cloud.dataproc.v1.CreateBatchRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + * + * + *
+   * Required. The parent resource where this batch will be created.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The parent resource where this batch will be created.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BATCH_FIELD_NUMBER = 2; + private com.google.cloud.dataproc.v1.Batch batch_; + /** + * + * + *
+   * Required. The batch to create.
+   * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the batch field is set. + */ + @java.lang.Override + public boolean hasBatch() { + return batch_ != null; + } + /** + * + * + *
+   * Required. The batch to create.
+   * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The batch. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch getBatch() { + return batch_ == null ? com.google.cloud.dataproc.v1.Batch.getDefaultInstance() : batch_; + } + /** + * + * + *
+   * Required. The batch to create.
+   * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.BatchOrBuilder getBatchOrBuilder() { + return getBatch(); + } + + public static final int BATCH_ID_FIELD_NUMBER = 3; + private volatile java.lang.Object batchId_; + /** + * + * + *
+   * Optional. The ID to use for the batch, which will become the final component of
+   * the batch's resource name.
+   * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+   * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The batchId. + */ + @java.lang.Override + public java.lang.String getBatchId() { + java.lang.Object ref = batchId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + batchId_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. The ID to use for the batch, which will become the final component of
+   * the batch's resource name.
+   * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+   * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for batchId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBatchIdBytes() { + java.lang.Object ref = batchId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + batchId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + private volatile java.lang.Object requestId_; + /** + * + * + *
+   * Optional. A unique ID used to identify the request. If the service
+   * receives two
+   * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+   * with the same request_id, the second request is ignored and the
+   * Operation that corresponds to the first Batch created and stored
+   * in the backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The value must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. A unique ID used to identify the request. If the service
+   * receives two
+   * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+   * with the same request_id, the second request is ignored and the
+   * Operation that corresponds to the first Batch created and stored
+   * in the backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The value must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (batch_ != null) { + output.writeMessage(2, getBatch()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(batchId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, batchId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, requestId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (batch_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getBatch()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(batchId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, batchId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, requestId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.CreateBatchRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.CreateBatchRequest other = + (com.google.cloud.dataproc.v1.CreateBatchRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasBatch() != other.hasBatch()) return false; + if (hasBatch()) { + if (!getBatch().equals(other.getBatch())) return false; + } + if (!getBatchId().equals(other.getBatchId())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasBatch()) { + hash = (37 * hash) + BATCH_FIELD_NUMBER; + hash = (53 * hash) + getBatch().hashCode(); + } + hash = (37 * hash) + BATCH_ID_FIELD_NUMBER; + hash = (53 * hash) + getBatchId().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.CreateBatchRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A request to create a batch workload.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.CreateBatchRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.CreateBatchRequest) + com.google.cloud.dataproc.v1.CreateBatchRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_CreateBatchRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_CreateBatchRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.CreateBatchRequest.class, + com.google.cloud.dataproc.v1.CreateBatchRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.CreateBatchRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + if (batchBuilder_ == null) { + batch_ = null; + } else { + batch_ = null; + batchBuilder_ = null; + } + batchId_ = ""; + + requestId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_CreateBatchRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.CreateBatchRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.CreateBatchRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.CreateBatchRequest build() { + com.google.cloud.dataproc.v1.CreateBatchRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.CreateBatchRequest buildPartial() { + com.google.cloud.dataproc.v1.CreateBatchRequest result = + new com.google.cloud.dataproc.v1.CreateBatchRequest(this); + result.parent_ = parent_; + if (batchBuilder_ == null) { + result.batch_ = batch_; + } else { + result.batch_ = batchBuilder_.build(); + } + result.batchId_ = batchId_; + result.requestId_ = requestId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.CreateBatchRequest) { + return mergeFrom((com.google.cloud.dataproc.v1.CreateBatchRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.CreateBatchRequest other) { + if (other == com.google.cloud.dataproc.v1.CreateBatchRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.hasBatch()) { + mergeBatch(other.getBatch()); + } + if (!other.getBatchId().isEmpty()) { + batchId_ = other.batchId_; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.CreateBatchRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.CreateBatchRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. The parent resource where this batch will be created.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The parent resource where this batch will be created.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The parent resource where this batch will be created.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent resource where this batch will be created.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent resource where this batch will be created.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1.Batch batch_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch, + com.google.cloud.dataproc.v1.Batch.Builder, + com.google.cloud.dataproc.v1.BatchOrBuilder> + batchBuilder_; + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the batch field is set. + */ + public boolean hasBatch() { + return batchBuilder_ != null || batch_ != null; + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The batch. + */ + public com.google.cloud.dataproc.v1.Batch getBatch() { + if (batchBuilder_ == null) { + return batch_ == null ? com.google.cloud.dataproc.v1.Batch.getDefaultInstance() : batch_; + } else { + return batchBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBatch(com.google.cloud.dataproc.v1.Batch value) { + if (batchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + batch_ = value; + onChanged(); + } else { + batchBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBatch(com.google.cloud.dataproc.v1.Batch.Builder builderForValue) { + if (batchBuilder_ == null) { + batch_ = builderForValue.build(); + onChanged(); + } else { + batchBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeBatch(com.google.cloud.dataproc.v1.Batch value) { + if (batchBuilder_ == null) { + if (batch_ != null) { + batch_ = + com.google.cloud.dataproc.v1.Batch.newBuilder(batch_).mergeFrom(value).buildPartial(); + } else { + batch_ = value; + } + onChanged(); + } else { + batchBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearBatch() { + if (batchBuilder_ == null) { + batch_ = null; + onChanged(); + } else { + batch_ = null; + batchBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.dataproc.v1.Batch.Builder getBatchBuilder() { + + onChanged(); + return getBatchFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.dataproc.v1.BatchOrBuilder getBatchOrBuilder() { + if (batchBuilder_ != null) { + return batchBuilder_.getMessageOrBuilder(); + } else { + return batch_ == null ? com.google.cloud.dataproc.v1.Batch.getDefaultInstance() : batch_; + } + } + /** + * + * + *
+     * Required. The batch to create.
+     * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch, + com.google.cloud.dataproc.v1.Batch.Builder, + com.google.cloud.dataproc.v1.BatchOrBuilder> + getBatchFieldBuilder() { + if (batchBuilder_ == null) { + batchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch, + com.google.cloud.dataproc.v1.Batch.Builder, + com.google.cloud.dataproc.v1.BatchOrBuilder>( + getBatch(), getParentForChildren(), isClean()); + batch_ = null; + } + return batchBuilder_; + } + + private java.lang.Object batchId_ = ""; + /** + * + * + *
+     * Optional. The ID to use for the batch, which will become the final component of
+     * the batch's resource name.
+     * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+     * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The batchId. + */ + public java.lang.String getBatchId() { + java.lang.Object ref = batchId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + batchId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. The ID to use for the batch, which will become the final component of
+     * the batch's resource name.
+     * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+     * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for batchId. + */ + public com.google.protobuf.ByteString getBatchIdBytes() { + java.lang.Object ref = batchId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + batchId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. The ID to use for the batch, which will become the final component of
+     * the batch's resource name.
+     * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+     * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The batchId to set. + * @return This builder for chaining. + */ + public Builder setBatchId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + batchId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The ID to use for the batch, which will become the final component of
+     * the batch's resource name.
+     * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+     * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearBatchId() { + + batchId_ = getDefaultInstance().getBatchId(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The ID to use for the batch, which will become the final component of
+     * the batch's resource name.
+     * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+     * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for batchId to set. + * @return This builder for chaining. + */ + public Builder setBatchIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + batchId_ = value; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + /** + * + * + *
+     * Optional. A unique ID used to identify the request. If the service
+     * receives two
+     * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+     * with the same request_id, the second request is ignored and the
+     * Operation that corresponds to the first Batch created and stored
+     * in the backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The value must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. A unique ID used to identify the request. If the service
+     * receives two
+     * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+     * with the same request_id, the second request is ignored and the
+     * Operation that corresponds to the first Batch created and stored
+     * in the backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The value must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. A unique ID used to identify the request. If the service
+     * receives two
+     * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+     * with the same request_id, the second request is ignored and the
+     * Operation that corresponds to the first Batch created and stored
+     * in the backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The value must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + requestId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A unique ID used to identify the request. If the service
+     * receives two
+     * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+     * with the same request_id, the second request is ignored and the
+     * Operation that corresponds to the first Batch created and stored
+     * in the backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The value must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + + requestId_ = getDefaultInstance().getRequestId(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A unique ID used to identify the request. If the service
+     * receives two
+     * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+     * with the same request_id, the second request is ignored and the
+     * Operation that corresponds to the first Batch created and stored
+     * in the backend is returned.
+     * Recommendation: Set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The value must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + requestId_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.CreateBatchRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.CreateBatchRequest) + private static final com.google.cloud.dataproc.v1.CreateBatchRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.CreateBatchRequest(); + } + + public static com.google.cloud.dataproc.v1.CreateBatchRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateBatchRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CreateBatchRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.CreateBatchRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequestOrBuilder.java new file mode 100644 index 00000000..0ea10ac4 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateBatchRequestOrBuilder.java @@ -0,0 +1,164 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface CreateBatchRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.CreateBatchRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent resource where this batch will be created.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. The parent resource where this batch will be created.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The batch to create.
+   * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the batch field is set. + */ + boolean hasBatch(); + /** + * + * + *
+   * Required. The batch to create.
+   * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The batch. + */ + com.google.cloud.dataproc.v1.Batch getBatch(); + /** + * + * + *
+   * Required. The batch to create.
+   * 
+ * + * .google.cloud.dataproc.v1.Batch batch = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.dataproc.v1.BatchOrBuilder getBatchOrBuilder(); + + /** + * + * + *
+   * Optional. The ID to use for the batch, which will become the final component of
+   * the batch's resource name.
+   * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+   * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The batchId. + */ + java.lang.String getBatchId(); + /** + * + * + *
+   * Optional. The ID to use for the batch, which will become the final component of
+   * the batch's resource name.
+   * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
+   * 
+ * + * string batch_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for batchId. + */ + com.google.protobuf.ByteString getBatchIdBytes(); + + /** + * + * + *
+   * Optional. A unique ID used to identify the request. If the service
+   * receives two
+   * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+   * with the same request_id, the second request is ignored and the
+   * Operation that corresponds to the first Batch created and stored
+   * in the backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The value must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The requestId. + */ + java.lang.String getRequestId(); + /** + * + * + *
+   * Optional. A unique ID used to identify the request. If the service
+   * receives two
+   * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
+   * with the same request_id, the second request is ignored and the
+   * Operation that corresponds to the first Batch created and stored
+   * in the backend is returned.
+   * Recommendation: Set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The value must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequest.java new file mode 100644 index 00000000..7efd27a9 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequest.java @@ -0,0 +1,649 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A request to delete a batch workload.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.DeleteBatchRequest} + */ +public final class DeleteBatchRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.DeleteBatchRequest) + DeleteBatchRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use DeleteBatchRequest.newBuilder() to construct. + private DeleteBatchRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private DeleteBatchRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new DeleteBatchRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private DeleteBatchRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.DeleteBatchRequest.class, + com.google.cloud.dataproc.v1.DeleteBatchRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Required. The name of the batch resource to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the batch resource to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.DeleteBatchRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.DeleteBatchRequest other = + (com.google.cloud.dataproc.v1.DeleteBatchRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.DeleteBatchRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A request to delete a batch workload.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.DeleteBatchRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.DeleteBatchRequest) + com.google.cloud.dataproc.v1.DeleteBatchRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.DeleteBatchRequest.class, + com.google.cloud.dataproc.v1.DeleteBatchRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.DeleteBatchRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_DeleteBatchRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.DeleteBatchRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.DeleteBatchRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.DeleteBatchRequest build() { + com.google.cloud.dataproc.v1.DeleteBatchRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.DeleteBatchRequest buildPartial() { + com.google.cloud.dataproc.v1.DeleteBatchRequest result = + new com.google.cloud.dataproc.v1.DeleteBatchRequest(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.DeleteBatchRequest) { + return mergeFrom((com.google.cloud.dataproc.v1.DeleteBatchRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.DeleteBatchRequest other) { + if (other == com.google.cloud.dataproc.v1.DeleteBatchRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.DeleteBatchRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.DeleteBatchRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The name of the batch resource to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the batch resource to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the batch resource to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the batch resource to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the batch resource to delete.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.DeleteBatchRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DeleteBatchRequest) + private static final com.google.cloud.dataproc.v1.DeleteBatchRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.DeleteBatchRequest(); + } + + public static com.google.cloud.dataproc.v1.DeleteBatchRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteBatchRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DeleteBatchRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.DeleteBatchRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequestOrBuilder.java new file mode 100644 index 00000000..55c734ca --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteBatchRequestOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface DeleteBatchRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.DeleteBatchRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the batch resource to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. The name of the batch resource to delete.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequest.java new file mode 100644 index 00000000..9e300434 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequest.java @@ -0,0 +1,648 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A request to get the resource representation for a batch workload.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.GetBatchRequest} + */ +public final class GetBatchRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GetBatchRequest) + GetBatchRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use GetBatchRequest.newBuilder() to construct. + private GetBatchRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetBatchRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetBatchRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private GetBatchRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_GetBatchRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_GetBatchRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.GetBatchRequest.class, + com.google.cloud.dataproc.v1.GetBatchRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Required. The name of the batch to retrieve.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the batch to retrieve.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.GetBatchRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.GetBatchRequest other = + (com.google.cloud.dataproc.v1.GetBatchRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.GetBatchRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A request to get the resource representation for a batch workload.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.GetBatchRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GetBatchRequest) + com.google.cloud.dataproc.v1.GetBatchRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_GetBatchRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_GetBatchRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.GetBatchRequest.class, + com.google.cloud.dataproc.v1.GetBatchRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.GetBatchRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_GetBatchRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GetBatchRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.GetBatchRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GetBatchRequest build() { + com.google.cloud.dataproc.v1.GetBatchRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GetBatchRequest buildPartial() { + com.google.cloud.dataproc.v1.GetBatchRequest result = + new com.google.cloud.dataproc.v1.GetBatchRequest(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.GetBatchRequest) { + return mergeFrom((com.google.cloud.dataproc.v1.GetBatchRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.GetBatchRequest other) { + if (other == com.google.cloud.dataproc.v1.GetBatchRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.GetBatchRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.GetBatchRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The name of the batch to retrieve.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the batch to retrieve.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the batch to retrieve.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the batch to retrieve.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the batch to retrieve.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GetBatchRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GetBatchRequest) + private static final com.google.cloud.dataproc.v1.GetBatchRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.GetBatchRequest(); + } + + public static com.google.cloud.dataproc.v1.GetBatchRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetBatchRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetBatchRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.GetBatchRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequestOrBuilder.java new file mode 100644 index 00000000..e8b97dd4 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetBatchRequestOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface GetBatchRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GetBatchRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the batch to retrieve.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. The name of the batch to retrieve.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequest.java new file mode 100644 index 00000000..09cd5aa3 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequest.java @@ -0,0 +1,933 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A request to list batch workloads in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ListBatchesRequest} + */ +public final class ListBatchesRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.ListBatchesRequest) + ListBatchesRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListBatchesRequest.newBuilder() to construct. + private ListBatchesRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListBatchesRequest() { + parent_ = ""; + pageToken_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListBatchesRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ListBatchesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 16: + { + pageSize_ = input.readInt32(); + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + + pageToken_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ListBatchesRequest.class, + com.google.cloud.dataproc.v1.ListBatchesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + * + * + *
+   * Required. The parent, which owns this collection of batches.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The parent, which owns this collection of batches.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_; + /** + * + * + *
+   * Optional. The maximum number of batches to return in each response.
+   * The service may return fewer than this value.
+   * The default page size is 20; the maximum page size is 1000.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + private volatile java.lang.Object pageToken_; + /** + * + * + *
+   * Optional. A page token received from a previous `ListBatches` call.
+   * Provide this token to retrieve the subsequent page.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. A page token received from a previous `ListBatches` call.
+   * Provide this token to retrieve the subsequent page.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pageToken_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, pageToken_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.ListBatchesRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.ListBatchesRequest other = + (com.google.cloud.dataproc.v1.ListBatchesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.ListBatchesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A request to list batch workloads in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ListBatchesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.ListBatchesRequest) + com.google.cloud.dataproc.v1.ListBatchesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ListBatchesRequest.class, + com.google.cloud.dataproc.v1.ListBatchesRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.ListBatchesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + pageSize_ = 0; + + pageToken_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.ListBatchesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesRequest build() { + com.google.cloud.dataproc.v1.ListBatchesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesRequest buildPartial() { + com.google.cloud.dataproc.v1.ListBatchesRequest result = + new com.google.cloud.dataproc.v1.ListBatchesRequest(this); + result.parent_ = parent_; + result.pageSize_ = pageSize_; + result.pageToken_ = pageToken_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.ListBatchesRequest) { + return mergeFrom((com.google.cloud.dataproc.v1.ListBatchesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.ListBatchesRequest other) { + if (other == com.google.cloud.dataproc.v1.ListBatchesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.ListBatchesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.ListBatchesRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. The parent, which owns this collection of batches.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The parent, which owns this collection of batches.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The parent, which owns this collection of batches.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent, which owns this collection of batches.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent, which owns this collection of batches.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private int pageSize_; + /** + * + * + *
+     * Optional. The maximum number of batches to return in each response.
+     * The service may return fewer than this value.
+     * The default page size is 20; the maximum page size is 1000.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + /** + * + * + *
+     * Optional. The maximum number of batches to return in each response.
+     * The service may return fewer than this value.
+     * The default page size is 20; the maximum page size is 1000.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The maximum number of batches to return in each response.
+     * The service may return fewer than this value.
+     * The default page size is 20; the maximum page size is 1000.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + /** + * + * + *
+     * Optional. A page token received from a previous `ListBatches` call.
+     * Provide this token to retrieve the subsequent page.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. A page token received from a previous `ListBatches` call.
+     * Provide this token to retrieve the subsequent page.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. A page token received from a previous `ListBatches` call.
+     * Provide this token to retrieve the subsequent page.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + pageToken_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A page token received from a previous `ListBatches` call.
+     * Provide this token to retrieve the subsequent page.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + + pageToken_ = getDefaultInstance().getPageToken(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A page token received from a previous `ListBatches` call.
+     * Provide this token to retrieve the subsequent page.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + pageToken_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.ListBatchesRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListBatchesRequest) + private static final com.google.cloud.dataproc.v1.ListBatchesRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.ListBatchesRequest(); + } + + public static com.google.cloud.dataproc.v1.ListBatchesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBatchesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListBatchesRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequestOrBuilder.java new file mode 100644 index 00000000..3f4c4c33 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesRequestOrBuilder.java @@ -0,0 +1,96 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface ListBatchesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.ListBatchesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent, which owns this collection of batches.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. The parent, which owns this collection of batches.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. The maximum number of batches to return in each response.
+   * The service may return fewer than this value.
+   * The default page size is 20; the maximum page size is 1000.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. A page token received from a previous `ListBatches` call.
+   * Provide this token to retrieve the subsequent page.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + /** + * + * + *
+   * Optional. A page token received from a previous `ListBatches` call.
+   * Provide this token to retrieve the subsequent page.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponse.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponse.java new file mode 100644 index 00000000..4106275b --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponse.java @@ -0,0 +1,1133 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A list of batch workloads.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ListBatchesResponse} + */ +public final class ListBatchesResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.ListBatchesResponse) + ListBatchesResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListBatchesResponse.newBuilder() to construct. + private ListBatchesResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListBatchesResponse() { + batches_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListBatchesResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ListBatchesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + batches_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + batches_.add( + input.readMessage( + com.google.cloud.dataproc.v1.Batch.parser(), extensionRegistry)); + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + nextPageToken_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + batches_ = java.util.Collections.unmodifiableList(batches_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ListBatchesResponse.class, + com.google.cloud.dataproc.v1.ListBatchesResponse.Builder.class); + } + + public static final int BATCHES_FIELD_NUMBER = 1; + private java.util.List batches_; + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + @java.lang.Override + public java.util.List getBatchesList() { + return batches_; + } + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + @java.lang.Override + public java.util.List + getBatchesOrBuilderList() { + return batches_; + } + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + @java.lang.Override + public int getBatchesCount() { + return batches_.size(); + } + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.Batch getBatches(int index) { + return batches_.get(index); + } + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.BatchOrBuilder getBatchesOrBuilder(int index) { + return batches_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + private volatile java.lang.Object nextPageToken_; + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < batches_.size(); i++) { + output.writeMessage(1, batches_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < batches_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, batches_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.ListBatchesResponse)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.ListBatchesResponse other = + (com.google.cloud.dataproc.v1.ListBatchesResponse) obj; + + if (!getBatchesList().equals(other.getBatchesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getBatchesCount() > 0) { + hash = (37 * hash) + BATCHES_FIELD_NUMBER; + hash = (53 * hash) + getBatchesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.ListBatchesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A list of batch workloads.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ListBatchesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.ListBatchesResponse) + com.google.cloud.dataproc.v1.ListBatchesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ListBatchesResponse.class, + com.google.cloud.dataproc.v1.ListBatchesResponse.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.ListBatchesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getBatchesFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (batchesBuilder_ == null) { + batches_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + batchesBuilder_.clear(); + } + nextPageToken_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_ListBatchesResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesResponse getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.ListBatchesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesResponse build() { + com.google.cloud.dataproc.v1.ListBatchesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesResponse buildPartial() { + com.google.cloud.dataproc.v1.ListBatchesResponse result = + new com.google.cloud.dataproc.v1.ListBatchesResponse(this); + int from_bitField0_ = bitField0_; + if (batchesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + batches_ = java.util.Collections.unmodifiableList(batches_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.batches_ = batches_; + } else { + result.batches_ = batchesBuilder_.build(); + } + result.nextPageToken_ = nextPageToken_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.ListBatchesResponse) { + return mergeFrom((com.google.cloud.dataproc.v1.ListBatchesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.ListBatchesResponse other) { + if (other == com.google.cloud.dataproc.v1.ListBatchesResponse.getDefaultInstance()) + return this; + if (batchesBuilder_ == null) { + if (!other.batches_.isEmpty()) { + if (batches_.isEmpty()) { + batches_ = other.batches_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureBatchesIsMutable(); + batches_.addAll(other.batches_); + } + onChanged(); + } + } else { + if (!other.batches_.isEmpty()) { + if (batchesBuilder_.isEmpty()) { + batchesBuilder_.dispose(); + batchesBuilder_ = null; + batches_ = other.batches_; + bitField0_ = (bitField0_ & ~0x00000001); + batchesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getBatchesFieldBuilder() + : null; + } else { + batchesBuilder_.addAllMessages(other.batches_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.ListBatchesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.ListBatchesResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.util.List batches_ = + java.util.Collections.emptyList(); + + private void ensureBatchesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + batches_ = new java.util.ArrayList(batches_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch, + com.google.cloud.dataproc.v1.Batch.Builder, + com.google.cloud.dataproc.v1.BatchOrBuilder> + batchesBuilder_; + + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public java.util.List getBatchesList() { + if (batchesBuilder_ == null) { + return java.util.Collections.unmodifiableList(batches_); + } else { + return batchesBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public int getBatchesCount() { + if (batchesBuilder_ == null) { + return batches_.size(); + } else { + return batchesBuilder_.getCount(); + } + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public com.google.cloud.dataproc.v1.Batch getBatches(int index) { + if (batchesBuilder_ == null) { + return batches_.get(index); + } else { + return batchesBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder setBatches(int index, com.google.cloud.dataproc.v1.Batch value) { + if (batchesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBatchesIsMutable(); + batches_.set(index, value); + onChanged(); + } else { + batchesBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder setBatches( + int index, com.google.cloud.dataproc.v1.Batch.Builder builderForValue) { + if (batchesBuilder_ == null) { + ensureBatchesIsMutable(); + batches_.set(index, builderForValue.build()); + onChanged(); + } else { + batchesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder addBatches(com.google.cloud.dataproc.v1.Batch value) { + if (batchesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBatchesIsMutable(); + batches_.add(value); + onChanged(); + } else { + batchesBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder addBatches(int index, com.google.cloud.dataproc.v1.Batch value) { + if (batchesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBatchesIsMutable(); + batches_.add(index, value); + onChanged(); + } else { + batchesBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder addBatches(com.google.cloud.dataproc.v1.Batch.Builder builderForValue) { + if (batchesBuilder_ == null) { + ensureBatchesIsMutable(); + batches_.add(builderForValue.build()); + onChanged(); + } else { + batchesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder addBatches( + int index, com.google.cloud.dataproc.v1.Batch.Builder builderForValue) { + if (batchesBuilder_ == null) { + ensureBatchesIsMutable(); + batches_.add(index, builderForValue.build()); + onChanged(); + } else { + batchesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder addAllBatches( + java.lang.Iterable values) { + if (batchesBuilder_ == null) { + ensureBatchesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, batches_); + onChanged(); + } else { + batchesBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder clearBatches() { + if (batchesBuilder_ == null) { + batches_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + batchesBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public Builder removeBatches(int index) { + if (batchesBuilder_ == null) { + ensureBatchesIsMutable(); + batches_.remove(index); + onChanged(); + } else { + batchesBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public com.google.cloud.dataproc.v1.Batch.Builder getBatchesBuilder(int index) { + return getBatchesFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public com.google.cloud.dataproc.v1.BatchOrBuilder getBatchesOrBuilder(int index) { + if (batchesBuilder_ == null) { + return batches_.get(index); + } else { + return batchesBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public java.util.List + getBatchesOrBuilderList() { + if (batchesBuilder_ != null) { + return batchesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(batches_); + } + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public com.google.cloud.dataproc.v1.Batch.Builder addBatchesBuilder() { + return getBatchesFieldBuilder() + .addBuilder(com.google.cloud.dataproc.v1.Batch.getDefaultInstance()); + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public com.google.cloud.dataproc.v1.Batch.Builder addBatchesBuilder(int index) { + return getBatchesFieldBuilder() + .addBuilder(index, com.google.cloud.dataproc.v1.Batch.getDefaultInstance()); + } + /** + * + * + *
+     * The batches from the specified collection.
+     * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + public java.util.List getBatchesBuilderList() { + return getBatchesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch, + com.google.cloud.dataproc.v1.Batch.Builder, + com.google.cloud.dataproc.v1.BatchOrBuilder> + getBatchesFieldBuilder() { + if (batchesBuilder_ == null) { + batchesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1.Batch, + com.google.cloud.dataproc.v1.Batch.Builder, + com.google.cloud.dataproc.v1.BatchOrBuilder>( + batches_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + batches_ = null; + } + return batchesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + nextPageToken_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + + nextPageToken_ = getDefaultInstance().getNextPageToken(); + onChanged(); + return this; + } + /** + * + * + *
+     * A token, which can be sent as `page_token` to retrieve the next page.
+     * If this field is omitted, there are no subsequent pages.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + nextPageToken_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.ListBatchesResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListBatchesResponse) + private static final com.google.cloud.dataproc.v1.ListBatchesResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.ListBatchesResponse(); + } + + public static com.google.cloud.dataproc.v1.ListBatchesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBatchesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListBatchesResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ListBatchesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponseOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponseOrBuilder.java new file mode 100644 index 00000000..bc8cc883 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListBatchesResponseOrBuilder.java @@ -0,0 +1,103 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface ListBatchesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.ListBatchesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + java.util.List getBatchesList(); + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + com.google.cloud.dataproc.v1.Batch getBatches(int index); + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + int getBatchesCount(); + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + java.util.List getBatchesOrBuilderList(); + /** + * + * + *
+   * The batches from the specified collection.
+   * 
+ * + * repeated .google.cloud.dataproc.v1.Batch batches = 1; + */ + com.google.cloud.dataproc.v1.BatchOrBuilder getBatchesOrBuilder(int index); + + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + /** + * + * + *
+   * A token, which can be sent as `page_token` to retrieve the next page.
+   * If this field is omitted, there are no subsequent pages.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatch.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatch.java new file mode 100644 index 00000000..01199b11 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatch.java @@ -0,0 +1,2122 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A configuration for running an
+ * [Apache
+ * PySpark](https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html)
+ * batch workload.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.PySparkBatch} + */ +public final class PySparkBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.PySparkBatch) + PySparkBatchOrBuilder { + private static final long serialVersionUID = 0L; + // Use PySparkBatch.newBuilder() to construct. + private PySparkBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private PySparkBatch() { + mainPythonFileUri_ = ""; + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + pythonFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new PySparkBatch(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private PySparkBatch( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + mainPythonFileUri_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + args_.add(s); + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + pythonFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + pythonFileUris_.add(s); + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000004) != 0)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + jarFileUris_.add(s); + break; + } + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000008) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000008; + } + fileUris_.add(s); + break; + } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000010) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000010; + } + archiveUris_.add(s); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000002) != 0)) { + pythonFileUris_ = pythonFileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000004) != 0)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000008) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000010) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_PySparkBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_PySparkBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.PySparkBatch.class, + com.google.cloud.dataproc.v1.PySparkBatch.Builder.class); + } + + public static final int MAIN_PYTHON_FILE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object mainPythonFileUri_; + /** + * + * + *
+   * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainPythonFileUri. + */ + @java.lang.Override + public java.lang.String getMainPythonFileUri() { + java.lang.Object ref = mainPythonFileUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainPythonFileUri_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainPythonFileUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMainPythonFileUriBytes() { + java.lang.Object ref = mainPythonFileUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainPythonFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARGS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList args_; + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_; + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + + public static final int PYTHON_FILE_URIS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList pythonFileUris_; + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the pythonFileUris. + */ + public com.google.protobuf.ProtocolStringList getPythonFileUrisList() { + return pythonFileUris_; + } + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of pythonFileUris. + */ + public int getPythonFileUrisCount() { + return pythonFileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The pythonFileUris at the given index. + */ + public java.lang.String getPythonFileUris(int index) { + return pythonFileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the pythonFileUris at the given index. + */ + public com.google.protobuf.ByteString getPythonFileUrisBytes(int index) { + return pythonFileUris_.getByteString(index); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + public com.google.protobuf.ProtocolStringList getJarFileUrisList() { + return jarFileUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + public static final int FILE_URIS_FIELD_NUMBER = 5; + private com.google.protobuf.LazyStringList fileUris_; + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + + public static final int ARCHIVE_URIS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList archiveUris_; + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(mainPythonFileUri_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, mainPythonFileUri_); + } + for (int i = 0; i < args_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, args_.getRaw(i)); + } + for (int i = 0; i < pythonFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pythonFileUris_.getRaw(i)); + } + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, jarFileUris_.getRaw(i)); + } + for (int i = 0; i < fileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, fileUris_.getRaw(i)); + } + for (int i = 0; i < archiveUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, archiveUris_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(mainPythonFileUri_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, mainPythonFileUri_); + } + { + int dataSize = 0; + for (int i = 0; i < args_.size(); i++) { + dataSize += computeStringSizeNoTag(args_.getRaw(i)); + } + size += dataSize; + size += 1 * getArgsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < pythonFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(pythonFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getPythonFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < fileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(fileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < archiveUris_.size(); i++) { + dataSize += computeStringSizeNoTag(archiveUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getArchiveUrisList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.PySparkBatch)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.PySparkBatch other = + (com.google.cloud.dataproc.v1.PySparkBatch) obj; + + if (!getMainPythonFileUri().equals(other.getMainPythonFileUri())) return false; + if (!getArgsList().equals(other.getArgsList())) return false; + if (!getPythonFileUrisList().equals(other.getPythonFileUrisList())) return false; + if (!getJarFileUrisList().equals(other.getJarFileUrisList())) return false; + if (!getFileUrisList().equals(other.getFileUrisList())) return false; + if (!getArchiveUrisList().equals(other.getArchiveUrisList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAIN_PYTHON_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMainPythonFileUri().hashCode(); + if (getArgsCount() > 0) { + hash = (37 * hash) + ARGS_FIELD_NUMBER; + hash = (53 * hash) + getArgsList().hashCode(); + } + if (getPythonFileUrisCount() > 0) { + hash = (37 * hash) + PYTHON_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getPythonFileUrisList().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + if (getFileUrisCount() > 0) { + hash = (37 * hash) + FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getFileUrisList().hashCode(); + } + if (getArchiveUrisCount() > 0) { + hash = (37 * hash) + ARCHIVE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getArchiveUrisList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.PySparkBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A configuration for running an
+   * [Apache
+   * PySpark](https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html)
+   * batch workload.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.PySparkBatch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.PySparkBatch) + com.google.cloud.dataproc.v1.PySparkBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_PySparkBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_PySparkBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.PySparkBatch.class, + com.google.cloud.dataproc.v1.PySparkBatch.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.PySparkBatch.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + mainPythonFileUri_ = ""; + + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + pythonFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_PySparkBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatch getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatch build() { + com.google.cloud.dataproc.v1.PySparkBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatch buildPartial() { + com.google.cloud.dataproc.v1.PySparkBatch result = + new com.google.cloud.dataproc.v1.PySparkBatch(this); + int from_bitField0_ = bitField0_; + result.mainPythonFileUri_ = mainPythonFileUri_; + if (((bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.args_ = args_; + if (((bitField0_ & 0x00000002) != 0)) { + pythonFileUris_ = pythonFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.pythonFileUris_ = pythonFileUris_; + if (((bitField0_ & 0x00000004) != 0)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.jarFileUris_ = jarFileUris_; + if (((bitField0_ & 0x00000008) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.fileUris_ = fileUris_; + if (((bitField0_ & 0x00000010) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.archiveUris_ = archiveUris_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.PySparkBatch) { + return mergeFrom((com.google.cloud.dataproc.v1.PySparkBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.PySparkBatch other) { + if (other == com.google.cloud.dataproc.v1.PySparkBatch.getDefaultInstance()) return this; + if (!other.getMainPythonFileUri().isEmpty()) { + mainPythonFileUri_ = other.mainPythonFileUri_; + onChanged(); + } + if (!other.args_.isEmpty()) { + if (args_.isEmpty()) { + args_ = other.args_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureArgsIsMutable(); + args_.addAll(other.args_); + } + onChanged(); + } + if (!other.pythonFileUris_.isEmpty()) { + if (pythonFileUris_.isEmpty()) { + pythonFileUris_ = other.pythonFileUris_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePythonFileUrisIsMutable(); + pythonFileUris_.addAll(other.pythonFileUris_); + } + onChanged(); + } + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + if (!other.fileUris_.isEmpty()) { + if (fileUris_.isEmpty()) { + fileUris_ = other.fileUris_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureFileUrisIsMutable(); + fileUris_.addAll(other.fileUris_); + } + onChanged(); + } + if (!other.archiveUris_.isEmpty()) { + if (archiveUris_.isEmpty()) { + archiveUris_ = other.archiveUris_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureArchiveUrisIsMutable(); + archiveUris_.addAll(other.archiveUris_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.PySparkBatch parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.PySparkBatch) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object mainPythonFileUri_ = ""; + /** + * + * + *
+     * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainPythonFileUri. + */ + public java.lang.String getMainPythonFileUri() { + java.lang.Object ref = mainPythonFileUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainPythonFileUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainPythonFileUri. + */ + public com.google.protobuf.ByteString getMainPythonFileUriBytes() { + java.lang.Object ref = mainPythonFileUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainPythonFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The mainPythonFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainPythonFileUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + mainPythonFileUri_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearMainPythonFileUri() { + + mainPythonFileUri_ = getDefaultInstance().getMainPythonFileUri(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for mainPythonFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainPythonFileUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + mainPythonFileUri_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList args_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArgsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(args_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The args to set. + * @return This builder for chaining. + */ + public Builder setArgs(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The args to add. + * @return This builder for chaining. + */ + public Builder addArgs(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The args to add. + * @return This builder for chaining. + */ + public Builder addAllArgs(java.lang.Iterable values) { + ensureArgsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, args_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArgs() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the args to add. + * @return This builder for chaining. + */ + public Builder addArgsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList pythonFileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensurePythonFileUrisIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + pythonFileUris_ = new com.google.protobuf.LazyStringArrayList(pythonFileUris_); + bitField0_ |= 0x00000002; + } + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the pythonFileUris. + */ + public com.google.protobuf.ProtocolStringList getPythonFileUrisList() { + return pythonFileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of pythonFileUris. + */ + public int getPythonFileUrisCount() { + return pythonFileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The pythonFileUris at the given index. + */ + public java.lang.String getPythonFileUris(int index) { + return pythonFileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the pythonFileUris at the given index. + */ + public com.google.protobuf.ByteString getPythonFileUrisBytes(int index) { + return pythonFileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The pythonFileUris to set. + * @return This builder for chaining. + */ + public Builder setPythonFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePythonFileUrisIsMutable(); + pythonFileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pythonFileUris to add. + * @return This builder for chaining. + */ + public Builder addPythonFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePythonFileUrisIsMutable(); + pythonFileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The pythonFileUris to add. + * @return This builder for chaining. + */ + public Builder addAllPythonFileUris(java.lang.Iterable values) { + ensurePythonFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, pythonFileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPythonFileUris() { + pythonFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+     * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the pythonFileUris to add. + * @return This builder for chaining. + */ + public Builder addPythonFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensurePythonFileUrisIsMutable(); + pythonFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000004; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + public com.google.protobuf.ProtocolStringList getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The jarFileUris to set. + * @return This builder for chaining. + */ + public Builder setJarFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addJarFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addAllJarFileUris(java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, jarFileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addJarFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList fileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureFileUrisIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(fileUris_); + bitField0_ |= 0x00000008; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The fileUris to set. + * @return This builder for chaining. + */ + public Builder setFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The fileUris to add. + * @return This builder for chaining. + */ + public Builder addAllFileUris(java.lang.Iterable values) { + ensureFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFileUris() { + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList archiveUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArchiveUrisIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(archiveUris_); + bitField0_ |= 0x00000010; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The archiveUris to set. + * @return This builder for chaining. + */ + public Builder setArchiveUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addAllArchiveUris(java.lang.Iterable values) { + ensureArchiveUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, archiveUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArchiveUris() { + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.PySparkBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PySparkBatch) + private static final com.google.cloud.dataproc.v1.PySparkBatch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.PySparkBatch(); + } + + public static com.google.cloud.dataproc.v1.PySparkBatch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PySparkBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PySparkBatch(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PySparkBatch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatchOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatchOrBuilder.java new file mode 100644 index 00000000..b65ba0cb --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkBatchOrBuilder.java @@ -0,0 +1,335 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface PySparkBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.PySparkBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainPythonFileUri. + */ + java.lang.String getMainPythonFileUri(); + /** + * + * + *
+   * Required. The HCFS URI of the main Python file to use as the Spark driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainPythonFileUri. + */ + com.google.protobuf.ByteString getMainPythonFileUriBytes(); + + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + java.util.List getArgsList(); + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + int getArgsCount(); + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + java.lang.String getArgs(int index); + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + com.google.protobuf.ByteString getArgsBytes(int index); + + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the pythonFileUris. + */ + java.util.List getPythonFileUrisList(); + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of pythonFileUris. + */ + int getPythonFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The pythonFileUris at the given index. + */ + java.lang.String getPythonFileUris(int index); + /** + * + * + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: `.py`, `.egg`, and `.zip`.
+   * 
+ * + * repeated string python_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the pythonFileUris at the given index. + */ + com.google.protobuf.ByteString getPythonFileUrisBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + java.util.List getJarFileUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + int getJarFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + java.lang.String getJarFileUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + com.google.protobuf.ByteString getJarFileUrisBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + java.util.List getFileUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + int getFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + java.lang.String getFileUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + com.google.protobuf.ByteString getFileUrisBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + java.util.List getArchiveUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + int getArchiveUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + java.lang.String getArchiveUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + com.google.protobuf.ByteString getArchiveUrisBytes(int index); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatch.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatch.java new file mode 100644 index 00000000..0f00a9d6 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatch.java @@ -0,0 +1,2198 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A configuration for running an [Apache Spark](http://spark.apache.org/)
+ * batch workload.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkBatch} + */ +public final class SparkBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.SparkBatch) + SparkBatchOrBuilder { + private static final long serialVersionUID = 0L; + // Use SparkBatch.newBuilder() to construct. + private SparkBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SparkBatch() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SparkBatch(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private SparkBatch( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + driverCase_ = 1; + driver_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + driverCase_ = 2; + driver_ = s; + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + args_.add(s); + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + jarFileUris_.add(s); + break; + } + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000004) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + fileUris_.add(s); + break; + } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000008) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000008; + } + archiveUris_.add(s); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000004) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000008) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkBatch.class, + com.google.cloud.dataproc.v1.SparkBatch.Builder.class); + } + + private int driverCase_ = 0; + private java.lang.Object driver_; + + public enum DriverCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + MAIN_JAR_FILE_URI(1), + MAIN_CLASS(2), + DRIVER_NOT_SET(0); + private final int value; + + private DriverCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DriverCase valueOf(int value) { + return forNumber(value); + } + + public static DriverCase forNumber(int value) { + switch (value) { + case 1: + return MAIN_JAR_FILE_URI; + case 2: + return MAIN_CLASS; + case 0: + return DRIVER_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public DriverCase getDriverCase() { + return DriverCase.forNumber(driverCase_); + } + + public static final int MAIN_JAR_FILE_URI_FIELD_NUMBER = 1; + /** + * + * + *
+   * Optional. The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the mainJarFileUri field is set. + */ + public boolean hasMainJarFileUri() { + return driverCase_ == 1; + } + /** + * + * + *
+   * Optional. The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainJarFileUri. + */ + public java.lang.String getMainJarFileUri() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 1) { + driver_ = s; + } + return s; + } + } + /** + * + * + *
+   * Optional. The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainJarFileUri. + */ + public com.google.protobuf.ByteString getMainJarFileUriBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (driverCase_ == 1) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MAIN_CLASS_FIELD_NUMBER = 2; + /** + * + * + *
+   * Optional. The name of the driver main class. The jar file that contains the class
+   * must be in the classpath or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the mainClass field is set. + */ + public boolean hasMainClass() { + return driverCase_ == 2; + } + /** + * + * + *
+   * Optional. The name of the driver main class. The jar file that contains the class
+   * must be in the classpath or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainClass. + */ + public java.lang.String getMainClass() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 2) { + driver_ = s; + } + return s; + } + } + /** + * + * + *
+   * Optional. The name of the driver main class. The jar file that contains the class
+   * must be in the classpath or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainClass. + */ + public com.google.protobuf.ByteString getMainClassBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (driverCase_ == 2) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARGS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList args_; + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_; + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + public com.google.protobuf.ProtocolStringList getJarFileUrisList() { + return jarFileUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + public static final int FILE_URIS_FIELD_NUMBER = 5; + private com.google.protobuf.LazyStringList fileUris_; + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + + public static final int ARCHIVE_URIS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList archiveUris_; + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (driverCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, driver_); + } + if (driverCase_ == 2) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, driver_); + } + for (int i = 0; i < args_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, args_.getRaw(i)); + } + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, jarFileUris_.getRaw(i)); + } + for (int i = 0; i < fileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, fileUris_.getRaw(i)); + } + for (int i = 0; i < archiveUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, archiveUris_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (driverCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, driver_); + } + if (driverCase_ == 2) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, driver_); + } + { + int dataSize = 0; + for (int i = 0; i < args_.size(); i++) { + dataSize += computeStringSizeNoTag(args_.getRaw(i)); + } + size += dataSize; + size += 1 * getArgsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < fileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(fileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < archiveUris_.size(); i++) { + dataSize += computeStringSizeNoTag(archiveUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getArchiveUrisList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.SparkBatch)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.SparkBatch other = (com.google.cloud.dataproc.v1.SparkBatch) obj; + + if (!getArgsList().equals(other.getArgsList())) return false; + if (!getJarFileUrisList().equals(other.getJarFileUrisList())) return false; + if (!getFileUrisList().equals(other.getFileUrisList())) return false; + if (!getArchiveUrisList().equals(other.getArchiveUrisList())) return false; + if (!getDriverCase().equals(other.getDriverCase())) return false; + switch (driverCase_) { + case 1: + if (!getMainJarFileUri().equals(other.getMainJarFileUri())) return false; + break; + case 2: + if (!getMainClass().equals(other.getMainClass())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getArgsCount() > 0) { + hash = (37 * hash) + ARGS_FIELD_NUMBER; + hash = (53 * hash) + getArgsList().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + if (getFileUrisCount() > 0) { + hash = (37 * hash) + FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getFileUrisList().hashCode(); + } + if (getArchiveUrisCount() > 0) { + hash = (37 * hash) + ARCHIVE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getArchiveUrisList().hashCode(); + } + switch (driverCase_) { + case 1: + hash = (37 * hash) + MAIN_JAR_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMainJarFileUri().hashCode(); + break; + case 2: + hash = (37 * hash) + MAIN_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getMainClass().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.SparkBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A configuration for running an [Apache Spark](http://spark.apache.org/)
+   * batch workload.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkBatch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.SparkBatch) + com.google.cloud.dataproc.v1.SparkBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkBatch.class, + com.google.cloud.dataproc.v1.SparkBatch.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.SparkBatch.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + driverCase_ = 0; + driver_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatch getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatch build() { + com.google.cloud.dataproc.v1.SparkBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatch buildPartial() { + com.google.cloud.dataproc.v1.SparkBatch result = + new com.google.cloud.dataproc.v1.SparkBatch(this); + int from_bitField0_ = bitField0_; + if (driverCase_ == 1) { + result.driver_ = driver_; + } + if (driverCase_ == 2) { + result.driver_ = driver_; + } + if (((bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.args_ = args_; + if (((bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.jarFileUris_ = jarFileUris_; + if (((bitField0_ & 0x00000004) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.fileUris_ = fileUris_; + if (((bitField0_ & 0x00000008) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.archiveUris_ = archiveUris_; + result.driverCase_ = driverCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.SparkBatch) { + return mergeFrom((com.google.cloud.dataproc.v1.SparkBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.SparkBatch other) { + if (other == com.google.cloud.dataproc.v1.SparkBatch.getDefaultInstance()) return this; + if (!other.args_.isEmpty()) { + if (args_.isEmpty()) { + args_ = other.args_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureArgsIsMutable(); + args_.addAll(other.args_); + } + onChanged(); + } + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + if (!other.fileUris_.isEmpty()) { + if (fileUris_.isEmpty()) { + fileUris_ = other.fileUris_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureFileUrisIsMutable(); + fileUris_.addAll(other.fileUris_); + } + onChanged(); + } + if (!other.archiveUris_.isEmpty()) { + if (archiveUris_.isEmpty()) { + archiveUris_ = other.archiveUris_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureArchiveUrisIsMutable(); + archiveUris_.addAll(other.archiveUris_); + } + onChanged(); + } + switch (other.getDriverCase()) { + case MAIN_JAR_FILE_URI: + { + driverCase_ = 1; + driver_ = other.driver_; + onChanged(); + break; + } + case MAIN_CLASS: + { + driverCase_ = 2; + driver_ = other.driver_; + onChanged(); + break; + } + case DRIVER_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.SparkBatch parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.SparkBatch) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int driverCase_ = 0; + private java.lang.Object driver_; + + public DriverCase getDriverCase() { + return DriverCase.forNumber(driverCase_); + } + + public Builder clearDriver() { + driverCase_ = 0; + driver_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
+     * Optional. The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the mainJarFileUri field is set. + */ + @java.lang.Override + public boolean hasMainJarFileUri() { + return driverCase_ == 1; + } + /** + * + * + *
+     * Optional. The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainJarFileUri. + */ + @java.lang.Override + public java.lang.String getMainJarFileUri() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 1) { + driver_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainJarFileUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMainJarFileUriBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (driverCase_ == 1) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The mainJarFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainJarFileUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + driverCase_ = 1; + driver_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMainJarFileUri() { + if (driverCase_ == 1) { + driverCase_ = 0; + driver_ = null; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Optional. The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for mainJarFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainJarFileUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + driverCase_ = 1; + driver_ = value; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The name of the driver main class. The jar file that contains the class
+     * must be in the classpath or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the mainClass field is set. + */ + @java.lang.Override + public boolean hasMainClass() { + return driverCase_ == 2; + } + /** + * + * + *
+     * Optional. The name of the driver main class. The jar file that contains the class
+     * must be in the classpath or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainClass. + */ + @java.lang.Override + public java.lang.String getMainClass() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 2) { + driver_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. The name of the driver main class. The jar file that contains the class
+     * must be in the classpath or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainClass. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMainClassBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (driverCase_ == 2) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. The name of the driver main class. The jar file that contains the class
+     * must be in the classpath or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The mainClass to set. + * @return This builder for chaining. + */ + public Builder setMainClass(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + driverCase_ = 2; + driver_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The name of the driver main class. The jar file that contains the class
+     * must be in the classpath or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMainClass() { + if (driverCase_ == 2) { + driverCase_ = 0; + driver_ = null; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Optional. The name of the driver main class. The jar file that contains the class
+     * must be in the classpath or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for mainClass to set. + * @return This builder for chaining. + */ + public Builder setMainClassBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + driverCase_ = 2; + driver_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList args_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArgsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(args_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The args to set. + * @return This builder for chaining. + */ + public Builder setArgs(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The args to add. + * @return This builder for chaining. + */ + public Builder addArgs(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The args to add. + * @return This builder for chaining. + */ + public Builder addAllArgs(java.lang.Iterable values) { + ensureArgsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, args_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArgs() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the args to add. + * @return This builder for chaining. + */ + public Builder addArgsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000002; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + public com.google.protobuf.ProtocolStringList getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The jarFileUris to set. + * @return This builder for chaining. + */ + public Builder setJarFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addJarFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addAllJarFileUris(java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, jarFileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to add to the classpath of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addJarFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList fileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureFileUrisIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(fileUris_); + bitField0_ |= 0x00000004; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The fileUris to set. + * @return This builder for chaining. + */ + public Builder setFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The fileUris to add. + * @return This builder for chaining. + */ + public Builder addAllFileUris(java.lang.Iterable values) { + ensureFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFileUris() { + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList archiveUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArchiveUrisIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(archiveUris_); + bitField0_ |= 0x00000008; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The archiveUris to set. + * @return This builder for chaining. + */ + public Builder setArchiveUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addAllArchiveUris(java.lang.Iterable values) { + ensureArchiveUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, archiveUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArchiveUris() { + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.SparkBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkBatch) + private static final com.google.cloud.dataproc.v1.SparkBatch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.SparkBatch(); + } + + public static com.google.cloud.dataproc.v1.SparkBatch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SparkBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SparkBatch(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkBatch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatchOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatchOrBuilder.java new file mode 100644 index 00000000..8ce57703 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkBatchOrBuilder.java @@ -0,0 +1,332 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface SparkBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.SparkBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the mainJarFileUri field is set. + */ + boolean hasMainJarFileUri(); + /** + * + * + *
+   * Optional. The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainJarFileUri. + */ + java.lang.String getMainJarFileUri(); + /** + * + * + *
+   * Optional. The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainJarFileUri. + */ + com.google.protobuf.ByteString getMainJarFileUriBytes(); + + /** + * + * + *
+   * Optional. The name of the driver main class. The jar file that contains the class
+   * must be in the classpath or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the mainClass field is set. + */ + boolean hasMainClass(); + /** + * + * + *
+   * Optional. The name of the driver main class. The jar file that contains the class
+   * must be in the classpath or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The mainClass. + */ + java.lang.String getMainClass(); + /** + * + * + *
+   * Optional. The name of the driver main class. The jar file that contains the class
+   * must be in the classpath or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for mainClass. + */ + com.google.protobuf.ByteString getMainClassBytes(); + + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + java.util.List getArgsList(); + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + int getArgsCount(); + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + java.lang.String getArgs(int index); + /** + * + * + *
+   * Optional. The arguments to pass to the driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + com.google.protobuf.ByteString getArgsBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + java.util.List getJarFileUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + int getJarFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + java.lang.String getJarFileUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to add to the classpath of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + com.google.protobuf.ByteString getJarFileUrisBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + java.util.List getFileUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + int getFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + java.lang.String getFileUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + com.google.protobuf.ByteString getFileUrisBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + java.util.List getArchiveUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + int getArchiveUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + java.lang.String getArchiveUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + com.google.protobuf.ByteString getArchiveUrisBytes(int index); + + public com.google.cloud.dataproc.v1.SparkBatch.DriverCase getDriverCase(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatch.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatch.java new file mode 100644 index 00000000..f694d617 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatch.java @@ -0,0 +1,1541 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A configuration for running an
+ * [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html)
+ * batch workload.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkRBatch} + */ +public final class SparkRBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.SparkRBatch) + SparkRBatchOrBuilder { + private static final long serialVersionUID = 0L; + // Use SparkRBatch.newBuilder() to construct. + private SparkRBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SparkRBatch() { + mainRFileUri_ = ""; + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SparkRBatch(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private SparkRBatch( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + mainRFileUri_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + args_.add(s); + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + fileUris_.add(s); + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000004) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + archiveUris_.add(s); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000002) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000004) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkRBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkRBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkRBatch.class, + com.google.cloud.dataproc.v1.SparkRBatch.Builder.class); + } + + public static final int MAIN_R_FILE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object mainRFileUri_; + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a `.R` or `.r` file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainRFileUri. + */ + @java.lang.Override + public java.lang.String getMainRFileUri() { + java.lang.Object ref = mainRFileUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainRFileUri_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a `.R` or `.r` file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainRFileUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMainRFileUriBytes() { + java.lang.Object ref = mainRFileUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainRFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARGS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList args_; + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_; + } + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + + public static final int FILE_URIS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList fileUris_; + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + + public static final int ARCHIVE_URIS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList archiveUris_; + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(mainRFileUri_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, mainRFileUri_); + } + for (int i = 0; i < args_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, args_.getRaw(i)); + } + for (int i = 0; i < fileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, fileUris_.getRaw(i)); + } + for (int i = 0; i < archiveUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, archiveUris_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(mainRFileUri_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, mainRFileUri_); + } + { + int dataSize = 0; + for (int i = 0; i < args_.size(); i++) { + dataSize += computeStringSizeNoTag(args_.getRaw(i)); + } + size += dataSize; + size += 1 * getArgsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < fileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(fileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < archiveUris_.size(); i++) { + dataSize += computeStringSizeNoTag(archiveUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getArchiveUrisList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.SparkRBatch)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.SparkRBatch other = (com.google.cloud.dataproc.v1.SparkRBatch) obj; + + if (!getMainRFileUri().equals(other.getMainRFileUri())) return false; + if (!getArgsList().equals(other.getArgsList())) return false; + if (!getFileUrisList().equals(other.getFileUrisList())) return false; + if (!getArchiveUrisList().equals(other.getArchiveUrisList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAIN_R_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMainRFileUri().hashCode(); + if (getArgsCount() > 0) { + hash = (37 * hash) + ARGS_FIELD_NUMBER; + hash = (53 * hash) + getArgsList().hashCode(); + } + if (getFileUrisCount() > 0) { + hash = (37 * hash) + FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getFileUrisList().hashCode(); + } + if (getArchiveUrisCount() > 0) { + hash = (37 * hash) + ARCHIVE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getArchiveUrisList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.SparkRBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A configuration for running an
+   * [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html)
+   * batch workload.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkRBatch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.SparkRBatch) + com.google.cloud.dataproc.v1.SparkRBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkRBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkRBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkRBatch.class, + com.google.cloud.dataproc.v1.SparkRBatch.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.SparkRBatch.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + mainRFileUri_ = ""; + + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkRBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatch getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatch build() { + com.google.cloud.dataproc.v1.SparkRBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatch buildPartial() { + com.google.cloud.dataproc.v1.SparkRBatch result = + new com.google.cloud.dataproc.v1.SparkRBatch(this); + int from_bitField0_ = bitField0_; + result.mainRFileUri_ = mainRFileUri_; + if (((bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.args_ = args_; + if (((bitField0_ & 0x00000002) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.fileUris_ = fileUris_; + if (((bitField0_ & 0x00000004) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.archiveUris_ = archiveUris_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.SparkRBatch) { + return mergeFrom((com.google.cloud.dataproc.v1.SparkRBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.SparkRBatch other) { + if (other == com.google.cloud.dataproc.v1.SparkRBatch.getDefaultInstance()) return this; + if (!other.getMainRFileUri().isEmpty()) { + mainRFileUri_ = other.mainRFileUri_; + onChanged(); + } + if (!other.args_.isEmpty()) { + if (args_.isEmpty()) { + args_ = other.args_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureArgsIsMutable(); + args_.addAll(other.args_); + } + onChanged(); + } + if (!other.fileUris_.isEmpty()) { + if (fileUris_.isEmpty()) { + fileUris_ = other.fileUris_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureFileUrisIsMutable(); + fileUris_.addAll(other.fileUris_); + } + onChanged(); + } + if (!other.archiveUris_.isEmpty()) { + if (archiveUris_.isEmpty()) { + archiveUris_ = other.archiveUris_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureArchiveUrisIsMutable(); + archiveUris_.addAll(other.archiveUris_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.SparkRBatch parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.SparkRBatch) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object mainRFileUri_ = ""; + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a `.R` or `.r` file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainRFileUri. + */ + public java.lang.String getMainRFileUri() { + java.lang.Object ref = mainRFileUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainRFileUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a `.R` or `.r` file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainRFileUri. + */ + public com.google.protobuf.ByteString getMainRFileUriBytes() { + java.lang.Object ref = mainRFileUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainRFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a `.R` or `.r` file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The mainRFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainRFileUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + mainRFileUri_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a `.R` or `.r` file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearMainRFileUri() { + + mainRFileUri_ = getDefaultInstance().getMainRFileUri(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a `.R` or `.r` file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for mainRFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainRFileUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + mainRFileUri_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList args_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArgsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(args_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The args to set. + * @return This builder for chaining. + */ + public Builder setArgs(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The args to add. + * @return This builder for chaining. + */ + public Builder addArgs(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The args to add. + * @return This builder for chaining. + */ + public Builder addAllArgs(java.lang.Iterable values) { + ensureArgsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, args_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArgs() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the Spark driver. Do not include arguments
+     * that can be set as batch properties, such as `--conf`, since a collision
+     * can occur that causes an incorrect batch submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the args to add. + * @return This builder for chaining. + */ + public Builder addArgsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList fileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureFileUrisIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(fileUris_); + bitField0_ |= 0x00000002; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The fileUris to set. + * @return This builder for chaining. + */ + public Builder setFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The fileUris to add. + * @return This builder for chaining. + */ + public Builder addAllFileUris(java.lang.Iterable values) { + ensureFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFileUris() { + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be placed in the working directory of
+     * each executor.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList archiveUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArchiveUrisIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(archiveUris_); + bitField0_ |= 0x00000004; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The archiveUris to set. + * @return This builder for chaining. + */ + public Builder setArchiveUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addAllArchiveUris(java.lang.Iterable values) { + ensureArchiveUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, archiveUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArchiveUris() { + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted into the working directory
+     * of each executor. Supported file types:
+     * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.SparkRBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkRBatch) + private static final com.google.cloud.dataproc.v1.SparkRBatch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.SparkRBatch(); + } + + public static com.google.cloud.dataproc.v1.SparkRBatch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SparkRBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SparkRBatch(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRBatch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatchOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatchOrBuilder.java new file mode 100644 index 00000000..1083a669 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRBatchOrBuilder.java @@ -0,0 +1,225 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface SparkRBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.SparkRBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a `.R` or `.r` file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainRFileUri. + */ + java.lang.String getMainRFileUri(); + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a `.R` or `.r` file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainRFileUri. + */ + com.google.protobuf.ByteString getMainRFileUriBytes(); + + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + java.util.List getArgsList(); + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + int getArgsCount(); + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + java.lang.String getArgs(int index); + /** + * + * + *
+   * Optional. The arguments to pass to the Spark driver. Do not include arguments
+   * that can be set as batch properties, such as `--conf`, since a collision
+   * can occur that causes an incorrect batch submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + com.google.protobuf.ByteString getArgsBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + java.util.List getFileUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + int getFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + java.lang.String getFileUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of files to be placed in the working directory of
+   * each executor.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + com.google.protobuf.ByteString getFileUrisBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + java.util.List getArchiveUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + int getArchiveUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + java.lang.String getArchiveUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted into the working directory
+   * of each executor. Supported file types:
+   * `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + com.google.protobuf.ByteString getArchiveUrisBytes(int index); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatch.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatch.java new file mode 100644 index 00000000..170cbce5 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatch.java @@ -0,0 +1,1275 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A configuration for running
+ * [Apache Spark SQL](http://spark.apache.org/sql/) queries as a batch workload.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkSqlBatch} + */ +public final class SparkSqlBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.SparkSqlBatch) + SparkSqlBatchOrBuilder { + private static final long serialVersionUID = 0L; + // Use SparkSqlBatch.newBuilder() to construct. + private SparkSqlBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SparkSqlBatch() { + queryFileUri_ = ""; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SparkSqlBatch(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private SparkSqlBatch( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + queryFileUri_ = s; + break; + } + case 18: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + queryVariables_ = + com.google.protobuf.MapField.newMapField( + QueryVariablesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry queryVariables__ = + input.readMessage( + QueryVariablesDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + queryVariables_ + .getMutableMap() + .put(queryVariables__.getKey(), queryVariables__.getValue()); + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + jarFileUris_.add(s); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkSqlBatch_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 2: + return internalGetQueryVariables(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkSqlBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkSqlBatch.class, + com.google.cloud.dataproc.v1.SparkSqlBatch.Builder.class); + } + + public static final int QUERY_FILE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object queryFileUri_; + /** + * + * + *
+   * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+   * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queryFileUri. + */ + @java.lang.Override + public java.lang.String getQueryFileUri() { + java.lang.Object ref = queryFileUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + queryFileUri_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+   * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queryFileUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getQueryFileUriBytes() { + java.lang.Object ref = queryFileUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + queryFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int QUERY_VARIABLES_FIELD_NUMBER = 2; + + private static final class QueryVariablesDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkSqlBatch_QueryVariablesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + private com.google.protobuf.MapField queryVariables_; + + private com.google.protobuf.MapField + internalGetQueryVariables() { + if (queryVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + QueryVariablesDefaultEntryHolder.defaultEntry); + } + return queryVariables_; + } + + public int getQueryVariablesCount() { + return internalGetQueryVariables().getMap().size(); + } + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsQueryVariables(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetQueryVariables().getMap().containsKey(key); + } + /** Use {@link #getQueryVariablesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getQueryVariables() { + return getQueryVariablesMap(); + } + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getQueryVariablesMap() { + return internalGetQueryVariables().getMap(); + } + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getQueryVariablesOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetQueryVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getQueryVariablesOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetQueryVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + public com.google.protobuf.ProtocolStringList getJarFileUrisList() { + return jarFileUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(queryFileUri_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, queryFileUri_); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetQueryVariables(), QueryVariablesDefaultEntryHolder.defaultEntry, 2); + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, jarFileUris_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(queryFileUri_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, queryFileUri_); + } + for (java.util.Map.Entry entry : + internalGetQueryVariables().getMap().entrySet()) { + com.google.protobuf.MapEntry queryVariables__ = + QueryVariablesDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, queryVariables__); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.SparkSqlBatch)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.SparkSqlBatch other = + (com.google.cloud.dataproc.v1.SparkSqlBatch) obj; + + if (!getQueryFileUri().equals(other.getQueryFileUri())) return false; + if (!internalGetQueryVariables().equals(other.internalGetQueryVariables())) return false; + if (!getJarFileUrisList().equals(other.getJarFileUrisList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + QUERY_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getQueryFileUri().hashCode(); + if (!internalGetQueryVariables().getMap().isEmpty()) { + hash = (37 * hash) + QUERY_VARIABLES_FIELD_NUMBER; + hash = (53 * hash) + internalGetQueryVariables().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.SparkSqlBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A configuration for running
+   * [Apache Spark SQL](http://spark.apache.org/sql/) queries as a batch workload.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkSqlBatch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.SparkSqlBatch) + com.google.cloud.dataproc.v1.SparkSqlBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkSqlBatch_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 2: + return internalGetQueryVariables(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField(int number) { + switch (number) { + case 2: + return internalGetMutableQueryVariables(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkSqlBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkSqlBatch.class, + com.google.cloud.dataproc.v1.SparkSqlBatch.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.SparkSqlBatch.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + queryFileUri_ = ""; + + internalGetMutableQueryVariables().clear(); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.BatchesProto + .internal_static_google_cloud_dataproc_v1_SparkSqlBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatch getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatch build() { + com.google.cloud.dataproc.v1.SparkSqlBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatch buildPartial() { + com.google.cloud.dataproc.v1.SparkSqlBatch result = + new com.google.cloud.dataproc.v1.SparkSqlBatch(this); + int from_bitField0_ = bitField0_; + result.queryFileUri_ = queryFileUri_; + result.queryVariables_ = internalGetQueryVariables(); + result.queryVariables_.makeImmutable(); + if (((bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.jarFileUris_ = jarFileUris_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.SparkSqlBatch) { + return mergeFrom((com.google.cloud.dataproc.v1.SparkSqlBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.SparkSqlBatch other) { + if (other == com.google.cloud.dataproc.v1.SparkSqlBatch.getDefaultInstance()) return this; + if (!other.getQueryFileUri().isEmpty()) { + queryFileUri_ = other.queryFileUri_; + onChanged(); + } + internalGetMutableQueryVariables().mergeFrom(other.internalGetQueryVariables()); + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.SparkSqlBatch parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.SparkSqlBatch) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object queryFileUri_ = ""; + /** + * + * + *
+     * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+     * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queryFileUri. + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = queryFileUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + queryFileUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+     * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queryFileUri. + */ + public com.google.protobuf.ByteString getQueryFileUriBytes() { + java.lang.Object ref = queryFileUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + queryFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+     * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The queryFileUri to set. + * @return This builder for chaining. + */ + public Builder setQueryFileUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + queryFileUri_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+     * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearQueryFileUri() { + + queryFileUri_ = getDefaultInstance().getQueryFileUri(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+     * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for queryFileUri to set. + * @return This builder for chaining. + */ + public Builder setQueryFileUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + queryFileUri_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField queryVariables_; + + private com.google.protobuf.MapField + internalGetQueryVariables() { + if (queryVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + QueryVariablesDefaultEntryHolder.defaultEntry); + } + return queryVariables_; + } + + private com.google.protobuf.MapField + internalGetMutableQueryVariables() { + onChanged(); + ; + if (queryVariables_ == null) { + queryVariables_ = + com.google.protobuf.MapField.newMapField(QueryVariablesDefaultEntryHolder.defaultEntry); + } + if (!queryVariables_.isMutable()) { + queryVariables_ = queryVariables_.copy(); + } + return queryVariables_; + } + + public int getQueryVariablesCount() { + return internalGetQueryVariables().getMap().size(); + } + /** + * + * + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: `SET name="value";`).
+     * 
+ * + * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsQueryVariables(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetQueryVariables().getMap().containsKey(key); + } + /** Use {@link #getQueryVariablesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getQueryVariables() { + return getQueryVariablesMap(); + } + /** + * + * + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: `SET name="value";`).
+     * 
+ * + * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getQueryVariablesMap() { + return internalGetQueryVariables().getMap(); + } + /** + * + * + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: `SET name="value";`).
+     * 
+ * + * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getQueryVariablesOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetQueryVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: `SET name="value";`).
+     * 
+ * + * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getQueryVariablesOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetQueryVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearQueryVariables() { + internalGetMutableQueryVariables().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: `SET name="value";`).
+     * 
+ * + * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeQueryVariables(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableQueryVariables().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableQueryVariables() { + return internalGetMutableQueryVariables().getMutableMap(); + } + /** + * + * + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: `SET name="value";`).
+     * 
+ * + * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putQueryVariables(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + if (value == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableQueryVariables().getMutableMap().put(key, value); + return this; + } + /** + * + * + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: `SET name="value";`).
+     * 
+ * + * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllQueryVariables(java.util.Map values) { + internalGetMutableQueryVariables().getMutableMap().putAll(values); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000002; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + public com.google.protobuf.ProtocolStringList getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The jarFileUris to set. + * @return This builder for chaining. + */ + public Builder setJarFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addJarFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addAllJarFileUris(java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, jarFileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the jarFileUris to add. + * @return This builder for chaining. + */ + public Builder addJarFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.SparkSqlBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkSqlBatch) + private static final com.google.cloud.dataproc.v1.SparkSqlBatch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.SparkSqlBatch(); + } + + public static com.google.cloud.dataproc.v1.SparkSqlBatch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SparkSqlBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SparkSqlBatch(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkSqlBatch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatchOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatchOrBuilder.java new file mode 100644 index 00000000..ed8a233c --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlBatchOrBuilder.java @@ -0,0 +1,165 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/batches.proto + +package com.google.cloud.dataproc.v1; + +public interface SparkSqlBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.SparkSqlBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+   * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queryFileUri. + */ + java.lang.String getQueryFileUri(); + /** + * + * + *
+   * Required. The HCFS URI of the script that contains Spark SQL queries to execute.
+   * 
+ * + * string query_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queryFileUri. + */ + com.google.protobuf.ByteString getQueryFileUriBytes(); + + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getQueryVariablesCount(); + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsQueryVariables(java.lang.String key); + /** Use {@link #getQueryVariablesMap()} instead. */ + @java.lang.Deprecated + java.util.Map getQueryVariables(); + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getQueryVariablesMap(); + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getQueryVariablesOrDefault(java.lang.String key, java.lang.String defaultValue); + /** + * + * + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: `SET name="value";`).
+   * 
+ * + * map<string, string> query_variables = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getQueryVariablesOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the jarFileUris. + */ + java.util.List getJarFileUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of jarFileUris. + */ + int getJarFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The jarFileUris at the given index. + */ + java.lang.String getJarFileUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the jarFileUris at the given index. + */ + com.google.protobuf.ByteString getJarFileUrisBytes(int index); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/batches.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/batches.proto new file mode 100644 index 00000000..eafb4e35 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/batches.proto @@ -0,0 +1,372 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/dataproc/v1/shared.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "BatchesProto"; +option java_package = "com.google.cloud.dataproc.v1"; + +// The BatchController provides methods to manage batch workloads. +service BatchController { + option (google.api.default_host) = "dataproc.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a batch workload that executes asynchronously. + rpc CreateBatch(CreateBatchRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*}/batches" + body: "batch" + }; + option (google.api.method_signature) = "parent,batch,batch_id"; + option (google.longrunning.operation_info) = { + response_type: "Batch" + metadata_type: "google.cloud.dataproc.v1.BatchOperationMetadata" + }; + } + + // Gets the batch workload resource representation. + rpc GetBatch(GetBatchRequest) returns (Batch) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/batches/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists batch workloads. + rpc ListBatches(ListBatchesRequest) returns (ListBatchesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/locations/*}/batches" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes the batch workload resource. If the batch is not in terminal state, + // the delete fails and the response returns `FAILED_PRECONDITION`. + rpc DeleteBatch(DeleteBatchRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/locations/*/batches/*}" + }; + option (google.api.method_signature) = "name"; + } +} + +// A request to create a batch workload. +message CreateBatchRequest { + // Required. The parent resource where this batch will be created. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/Batch" + } + ]; + + // Required. The batch to create. + Batch batch = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID to use for the batch, which will become the final component of + // the batch's resource name. + // + // This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`. + string batch_id = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique ID used to identify the request. If the service + // receives two + // [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s + // with the same request_id, the second request is ignored and the + // Operation that corresponds to the first Batch created and stored + // in the backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The value must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to get the resource representation for a batch workload. +message GetBatchRequest { + // Required. The name of the batch to retrieve. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/Batch" + } + ]; +} + +// A request to list batch workloads in a project. +message ListBatchesRequest { + // Required. The parent, which owns this collection of batches. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/Batch" + } + ]; + + // Optional. The maximum number of batches to return in each response. + // The service may return fewer than this value. + // The default page size is 20; the maximum page size is 1000. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A page token received from a previous `ListBatches` call. + // Provide this token to retrieve the subsequent page. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A list of batch workloads. +message ListBatchesResponse { + // The batches from the specified collection. + repeated Batch batches = 1; + + // A token, which can be sent as `page_token` to retrieve the next page. + // If this field is omitted, there are no subsequent pages. + string next_page_token = 2; +} + +// A request to delete a batch workload. +message DeleteBatchRequest { + // Required. The name of the batch resource to delete. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/Batch" + } + ]; +} + +// A representation of a batch workload in the service. +message Batch { + option (google.api.resource) = { + type: "dataproc.googleapis.com/Batch" + pattern: "projects/{project}/locations/{location}/batches/{batch}" + }; + + // Historical state information. + message StateHistory { + // Output only. The state of the batch at this point in history. + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Details about the state at this point in history. + string state_message = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the batch entered the historical state. + google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // The batch state. + enum State { + // The batch state is unknown. + STATE_UNSPECIFIED = 0; + + // The batch is created before running. + PENDING = 1; + + // The batch is running. + RUNNING = 2; + + // The batch is cancelling. + CANCELLING = 3; + + // The batch cancellation was successful. + CANCELLED = 4; + + // The batch completed successfully. + SUCCEEDED = 5; + + // The batch is no longer running due to an error. + FAILED = 6; + } + + // Output only. The resource name of the batch. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A batch UUID (Unique Universal Identifier). The service + // generates this value when it creates the batch. + string uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the batch was created. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The application/framework-specific portion of the batch configuration. + oneof batch_config { + // Optional. PySpark batch config. + PySparkBatch pyspark_batch = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Spark batch config. + SparkBatch spark_batch = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. SparkR batch config. + SparkRBatch spark_r_batch = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. SparkSql batch config. + SparkSqlBatch spark_sql_batch = 7 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. Runtime information about batch execution. + RuntimeInfo runtime_info = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The state of the batch. + State state = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Batch state details, such as a failure + // description if the state is `FAILED`. + string state_message = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the batch entered a current state. + google.protobuf.Timestamp state_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The email address of the user who created the batch. + string creator = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The labels to associate with this batch. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC + // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a batch. + map labels = 13 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Runtime configuration for the batch execution. + RuntimeConfig runtime_config = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Environment configuration for the batch execution. + EnvironmentConfig environment_config = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The resource name of the operation associated with this batch. + string operation = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Historical state information for the batch. + repeated StateHistory state_history = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A configuration for running an +// [Apache +// PySpark](https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) +// batch workload. +message PySparkBatch { + // Required. The HCFS URI of the main Python file to use as the Spark driver. Must + // be a .py file. + string main_python_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the driver. Do not include arguments + // that can be set as batch properties, such as `--conf`, since a collision + // can occur that causes an incorrect batch submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS file URIs of Python files to pass to the PySpark + // framework. Supported file types: `.py`, `.egg`, and `.zip`. + repeated string python_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the classpath of the + // Spark driver and tasks. + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A configuration for running an [Apache Spark](http://spark.apache.org/) +// batch workload. +message SparkBatch { + // The specification of the main method to call to drive the Spark + // workload. Specify either the jar file that contains the main class or the + // main class name. To pass both a main jar and a main class in that jar, add + // the jar to `jar_file_uris`, and then specify the main class + // name in `main_class`. + oneof driver { + // Optional. The HCFS URI of the jar file that contains the main class. + string main_jar_file_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The name of the driver main class. The jar file that contains the class + // must be in the classpath or specified in `jar_file_uris`. + string main_class = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. The arguments to pass to the driver. Do not include arguments + // that can be set as batch properties, such as `--conf`, since a collision + // can occur that causes an incorrect batch submission. + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the classpath of the + // Spark driver and tasks. + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A configuration for running an +// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) +// batch workload. +message SparkRBatch { + // Required. The HCFS URI of the main R file to use as the driver. + // Must be a `.R` or `.r` file. + string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the Spark driver. Do not include arguments + // that can be set as batch properties, such as `--conf`, since a collision + // can occur that causes an incorrect batch submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A configuration for running +// [Apache Spark SQL](http://spark.apache.org/sql/) queries as a batch workload. +message SparkSqlBatch { + // Required. The HCFS URI of the script that contains Spark SQL queries to execute. + string query_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Mapping of query variable names to values (equivalent to the + // Spark SQL command: `SET name="value";`). + map query_variables = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + repeated string jar_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; +}