diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java new file mode 100644 index 0000000000..3ed7b778ec --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java @@ -0,0 +1,749 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryWriteStub; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

For supplementary information about the Write API, see: + * https://cloud.google.com/bigquery/docs/write-api + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+ *   WriteStream writeStream = WriteStream.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the BigQueryWriteClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BigQueryWriteSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class BigQueryWriteClient implements BackgroundResource { + private final BigQueryWriteSettings settings; + private final BigQueryWriteStub stub; + + /** Constructs an instance of BigQueryWriteClient with default settings. */ + public static final BigQueryWriteClient create() throws IOException { + return create(BigQueryWriteSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BigQueryWriteClient create(BigQueryWriteSettings settings) + throws IOException { + return new BigQueryWriteClient(settings); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given stub for making calls. This is + * for advanced usage - prefer using create(BigQueryWriteSettings). + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final BigQueryWriteClient create(BigQueryWriteStub stub) { + return new BigQueryWriteClient(stub); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BigQueryWriteClient(BigQueryWriteSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryWriteStubSettings) settings.getStubSettings()).createStub(); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected BigQueryWriteClient(BigQueryWriteStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BigQueryWriteSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BigQueryWriteStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+   *   WriteStream writeStream = WriteStream.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which the stream belongs, in the format of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param writeStream Required. Stream to be created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream createWriteStream(TableName parent, WriteStream writeStream) { + CreateWriteStreamRequest request = + CreateWriteStreamRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setWriteStream(writeStream) + .build(); + return createWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
+   *   WriteStream writeStream = WriteStream.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which the stream belongs, in the format of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param writeStream Required. Stream to be created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream createWriteStream(String parent, WriteStream writeStream) { + CreateWriteStreamRequest request = + CreateWriteStreamRequest.newBuilder().setParent(parent).setWriteStream(writeStream).build(); + return createWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request =
+   *       CreateWriteStreamRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setWriteStream(WriteStream.newBuilder().build())
+   *           .build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream createWriteStream(CreateWriteStreamRequest request) { + return createWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request =
+   *       CreateWriteStreamRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setWriteStream(WriteStream.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.createWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   WriteStream response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createWriteStreamCallable() { + return stub.createWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Appends data to the given stream. + * + *

If `offset` is specified, the `offset` is checked against the end of stream. The server + * returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset + * beyond the current end of the stream or `ALREADY_EXISTS` if user provides an `offset` that has + * already been written to. User can retry with adjusted offset within the same RPC connection. If + * `offset` is not specified, append happens at the end of the stream. + * + *

The response contains an optional offset at which the append happened. No offset information + * will be returned for appends to a default stream. + * + *

Responses are received in the same order in which requests are sent. There will be one + * response for each successful inserted request. Responses may optionally embed error information + * if the originating AppendRequest was not successfully processed. + * + *

The specifics of when successfully appended data is made visible to the table are governed + * by the type of stream: + * + *

+ * + * + * + * + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BidiStream bidiStream =
+   *       bigQueryWriteClient.appendRowsCallable().call();
+   *   AppendRowsRequest request =
+   *       AppendRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (AppendRowsResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final BidiStreamingCallable appendRowsCallable() { + return stub.appendRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream getWriteStream(WriteStreamName name) { + GetWriteStreamRequest request = + GetWriteStreamRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream getWriteStream(String name) { + GetWriteStreamRequest request = GetWriteStreamRequest.newBuilder().setName(name).build(); + return getWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request =
+   *       GetWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream getWriteStream(GetWriteStreamRequest request) { + return getWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request =
+   *       GetWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.getWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   WriteStream response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getWriteStreamCallable() { + return stub.getWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FinalizeWriteStreamResponse finalizeWriteStream(WriteStreamName name) { + FinalizeWriteStreamRequest request = + FinalizeWriteStreamRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return finalizeWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FinalizeWriteStreamResponse finalizeWriteStream(String name) { + FinalizeWriteStreamRequest request = + FinalizeWriteStreamRequest.newBuilder().setName(name).build(); + return finalizeWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request =
+   *       FinalizeWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FinalizeWriteStreamResponse finalizeWriteStream(FinalizeWriteStreamRequest request) { + return finalizeWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request =
+   *       FinalizeWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.finalizeWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   FinalizeWriteStreamResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + finalizeWriteStreamCallable() { + return stub.finalizeWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + * + *

Streams must be finalized before commit and cannot be committed multiple times. Once a + * stream is committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String parent = "parent-995424086";
+   *   BatchCommitWriteStreamsResponse response =
+   *       bigQueryWriteClient.batchCommitWriteStreams(parent);
+   * }
+   * }
+ * + * @param parent Required. Parent table that all the streams should belong to, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(String parent) { + BatchCommitWriteStreamsRequest request = + BatchCommitWriteStreamsRequest.newBuilder().setParent(parent).build(); + return batchCommitWriteStreams(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + * + *

Streams must be finalized before commit and cannot be committed multiple times. Once a + * stream is committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request =
+   *       BatchCommitWriteStreamsRequest.newBuilder()
+   *           .setParent("parent-995424086")
+   *           .addAllWriteStreams(new ArrayList())
+   *           .build();
+   *   BatchCommitWriteStreamsResponse response =
+   *       bigQueryWriteClient.batchCommitWriteStreams(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request) { + return batchCommitWriteStreamsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + * + *

Streams must be finalized before commit and cannot be committed multiple times. Once a + * stream is committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request =
+   *       BatchCommitWriteStreamsRequest.newBuilder()
+   *           .setParent("parent-995424086")
+   *           .addAllWriteStreams(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.batchCommitWriteStreamsCallable().futureCall(request);
+   *   // Do something.
+   *   BatchCommitWriteStreamsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + batchCommitWriteStreamsCallable() { + return stub.batchCommitWriteStreamsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName writeStream =
+   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
+   * }
+   * }
+ * + * @param writeStream Required. The stream that is the target of the flush operation. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FlushRowsResponse flushRows(WriteStreamName writeStream) { + FlushRowsRequest request = + FlushRowsRequest.newBuilder() + .setWriteStream(writeStream == null ? null : writeStream.toString()) + .build(); + return flushRows(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String writeStream =
+   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
+   * }
+   * }
+ * + * @param writeStream Required. The stream that is the target of the flush operation. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FlushRowsResponse flushRows(String writeStream) { + FlushRowsRequest request = FlushRowsRequest.newBuilder().setWriteStream(writeStream).build(); + return flushRows(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FlushRowsRequest request =
+   *       FlushRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .build();
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FlushRowsResponse flushRows(FlushRowsRequest request) { + return flushRowsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FlushRowsRequest request =
+   *       FlushRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.flushRowsCallable().futureCall(request);
+   *   // Do something.
+   *   FlushRowsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable flushRowsCallable() { + return stub.flushRowsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java new file mode 100644 index 0000000000..1a3ec3ceec --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java @@ -0,0 +1,239 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryWriteClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createWriteStream to 30 seconds: + * + *

{@code
+ * BigQueryWriteSettings.Builder bigQueryWriteSettingsBuilder = BigQueryWriteSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder
+ *     .createWriteStreamSettings()
+ *     .setRetrySettings(
+ *         bigQueryWriteSettingsBuilder
+ *             .createWriteStreamSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setTotalTimeout(Duration.ofSeconds(30))
+ *             .build());
+ * BigQueryWriteSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class BigQueryWriteSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createWriteStream. */ + public UnaryCallSettings createWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).createWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to appendRows. */ + public StreamingCallSettings appendRowsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).appendRowsSettings(); + } + + /** Returns the object with the settings used for calls to getWriteStream. */ + public UnaryCallSettings getWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).getWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings + finalizeWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).finalizeWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).batchCommitWriteStreamsSettings(); + } + + /** Returns the object with the settings used for calls to flushRows. */ + public UnaryCallSettings flushRowsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).flushRowsSettings(); + } + + public static final BigQueryWriteSettings create(BigQueryWriteStubSettings stub) + throws IOException { + return new BigQueryWriteSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryWriteStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryWriteStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryWriteStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryWriteStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryWriteStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryWriteStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryWriteStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BigQueryWriteSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryWriteStubSettings.newBuilder(clientContext)); + } + + protected Builder(BigQueryWriteSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryWriteStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(BigQueryWriteStubSettings.newBuilder()); + } + + public BigQueryWriteStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryWriteStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createWriteStream. */ + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return getStubSettingsBuilder().createWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to appendRows. */ + public StreamingCallSettings.Builder + appendRowsSettings() { + return getStubSettingsBuilder().appendRowsSettings(); + } + + /** Returns the builder for the settings used for calls to getWriteStream. */ + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getStubSettingsBuilder().getWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return getStubSettingsBuilder().finalizeWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return getStubSettingsBuilder().batchCommitWriteStreamsSettings(); + } + + /** Returns the builder for the settings used for calls to flushRows. */ + public UnaryCallSettings.Builder flushRowsSettings() { + return getStubSettingsBuilder().flushRowsSettings(); + } + + @Override + public BigQueryWriteSettings build() throws IOException { + return new BigQueryWriteSettings(this); + } + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/gapic_metadata.json b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/gapic_metadata.json index a780ca20ec..50f5dba082 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/gapic_metadata.json +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/gapic_metadata.json @@ -22,6 +22,33 @@ } } } + }, + "BigQueryWrite": { + "clients": { + "grpc": { + "libraryClient": "BigQueryWriteClient", + "rpcs": { + "AppendRows": { + "methods": ["appendRowsCallable"] + }, + "BatchCommitWriteStreams": { + "methods": ["batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreamsCallable"] + }, + "CreateWriteStream": { + "methods": ["createWriteStream", "createWriteStream", "createWriteStream", "createWriteStreamCallable"] + }, + "FinalizeWriteStream": { + "methods": ["finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStreamCallable"] + }, + "FlushRows": { + "methods": ["flushRows", "flushRows", "flushRows", "flushRowsCallable"] + }, + "GetWriteStream": { + "methods": ["getWriteStream", "getWriteStream", "getWriteStream", "getWriteStreamCallable"] + } + } + } + } } } } \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java index 4c8debd6e8..cf95930441 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java @@ -34,6 +34,25 @@ * baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount); * } * } + * + *

======================= BigQueryWriteClient ======================= + * + *

Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

For supplementary information about the Write API, see: + * https://cloud.google.com/bigquery/docs/write-api + * + *

Sample for BigQueryWriteClient: + * + *

{@code
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+ *   WriteStream writeStream = WriteStream.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+ * }
+ * }
*/ @Generated("by gapic-generator-java") package com.google.cloud.bigquery.storage.v1; diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java index 751a07b377..8ca93bf54e 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java @@ -215,11 +215,6 @@ public static class Builder extends StubSettings.BuildernewArrayList(StatusCode.Code.UNAVAILABLE))); - definitions.put( - "retry_policy_2_codes", - ImmutableSet.copyOf( - Lists.newArrayList( - StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); RETRYABLE_CODE_DEFINITIONS = definitions.build(); } @@ -250,17 +245,6 @@ public static class Builder extends StubSettings.BuilderThis class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class BigQueryWriteStub implements BackgroundResource { + + public UnaryCallable createWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: createWriteStreamCallable()"); + } + + public BidiStreamingCallable appendRowsCallable() { + throw new UnsupportedOperationException("Not implemented: appendRowsCallable()"); + } + + public UnaryCallable getWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: getWriteStreamCallable()"); + } + + public UnaryCallable + finalizeWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: finalizeWriteStreamCallable()"); + } + + public UnaryCallable + batchCommitWriteStreamsCallable() { + throw new UnsupportedOperationException("Not implemented: batchCommitWriteStreamsCallable()"); + } + + public UnaryCallable flushRowsCallable() { + throw new UnsupportedOperationException("Not implemented: flushRowsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java new file mode 100644 index 0000000000..4c8e08deac --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java @@ -0,0 +1,425 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryWriteStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createWriteStream to 30 seconds: + * + *

{@code
+ * BigQueryWriteStubSettings.Builder bigQueryWriteSettingsBuilder =
+ *     BigQueryWriteStubSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder
+ *     .createWriteStreamSettings()
+ *     .setRetrySettings(
+ *         bigQueryWriteSettingsBuilder
+ *             .createWriteStreamSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setTotalTimeout(Duration.ofSeconds(30))
+ *             .build());
+ * BigQueryWriteStubSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class BigQueryWriteStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/bigquery.insertdata") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createWriteStreamSettings; + private final StreamingCallSettings appendRowsSettings; + private final UnaryCallSettings getWriteStreamSettings; + private final UnaryCallSettings + finalizeWriteStreamSettings; + private final UnaryCallSettings + batchCommitWriteStreamsSettings; + private final UnaryCallSettings flushRowsSettings; + + /** Returns the object with the settings used for calls to createWriteStream. */ + public UnaryCallSettings createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to appendRows. */ + public StreamingCallSettings appendRowsSettings() { + return appendRowsSettings; + } + + /** Returns the object with the settings used for calls to getWriteStream. */ + public UnaryCallSettings getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + /** Returns the object with the settings used for calls to flushRows. */ + public UnaryCallSettings flushRowsSettings() { + return flushRowsSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BigQueryWriteStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryWriteStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryWriteStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createWriteStreamSettings = settingsBuilder.createWriteStreamSettings().build(); + appendRowsSettings = settingsBuilder.appendRowsSettings().build(); + getWriteStreamSettings = settingsBuilder.getWriteStreamSettings().build(); + finalizeWriteStreamSettings = settingsBuilder.finalizeWriteStreamSettings().build(); + batchCommitWriteStreamsSettings = settingsBuilder.batchCommitWriteStreamsSettings().build(); + flushRowsSettings = settingsBuilder.flushRowsSettings().build(); + } + + /** Builder for BigQueryWriteStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder + createWriteStreamSettings; + private final StreamingCallSettings.Builder + appendRowsSettings; + private final UnaryCallSettings.Builder + getWriteStreamSettings; + private final UnaryCallSettings.Builder + finalizeWriteStreamSettings; + private final UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings; + private final UnaryCallSettings.Builder flushRowsSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_2_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_3_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(600000L)) + .setTotalTimeout(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_2_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(86400000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(86400000L)) + .setTotalTimeout(Duration.ofMillis(86400000L)) + .build(); + definitions.put("retry_policy_3_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + appendRowsSettings = StreamingCallSettings.newBuilder(); + getWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + finalizeWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchCommitWriteStreamsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + flushRowsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings, + flushRowsSettings); + initDefaults(this); + } + + protected Builder(BigQueryWriteStubSettings settings) { + super(settings); + + createWriteStreamSettings = settings.createWriteStreamSettings.toBuilder(); + appendRowsSettings = settings.appendRowsSettings.toBuilder(); + getWriteStreamSettings = settings.getWriteStreamSettings.toBuilder(); + finalizeWriteStreamSettings = settings.finalizeWriteStreamSettings.toBuilder(); + batchCommitWriteStreamsSettings = settings.batchCommitWriteStreamsSettings.toBuilder(); + flushRowsSettings = settings.flushRowsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings, + flushRowsSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + builder + .getWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + builder + .finalizeWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + builder + .batchCommitWriteStreamsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + builder + .flushRowsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createWriteStream. */ + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to appendRows. */ + public StreamingCallSettings.Builder + appendRowsSettings() { + return appendRowsSettings; + } + + /** Returns the builder for the settings used for calls to getWriteStream. */ + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + /** Returns the builder for the settings used for calls to flushRows. */ + public UnaryCallSettings.Builder flushRowsSettings() { + return flushRowsSettings; + } + + @Override + public BigQueryWriteStubSettings build() throws IOException { + return new BigQueryWriteStubSettings(this); + } + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java new file mode 100644 index 0000000000..1b571da17e --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the BigQueryWrite service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcBigQueryWriteCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java new file mode 100644 index 0000000000..a5e81a3dfd --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java @@ -0,0 +1,332 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.common.collect.ImmutableMap; +import com.google.longrunning.stub.GrpcOperationsStub; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the BigQueryWrite service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcBigQueryWriteStub extends BigQueryWriteStub { + private static final MethodDescriptor + createWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/CreateWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + appendRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/AppendRows") + .setRequestMarshaller(ProtoUtils.marshaller(AppendRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AppendRowsResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + getWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/GetWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(GetWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + finalizeWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1.BigQueryWrite/FinalizeWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1.BigQueryWrite/BatchCommitWriteStreams") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + flushRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/FlushRows") + .setRequestMarshaller(ProtoUtils.marshaller(FlushRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(FlushRowsResponse.getDefaultInstance())) + .build(); + + private final UnaryCallable createWriteStreamCallable; + private final BidiStreamingCallable appendRowsCallable; + private final UnaryCallable getWriteStreamCallable; + private final UnaryCallable + finalizeWriteStreamCallable; + private final UnaryCallable + batchCommitWriteStreamsCallable; + private final UnaryCallable flushRowsCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryWriteStub create(BigQueryWriteStubSettings settings) + throws IOException { + return new GrpcBigQueryWriteStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryWriteStub create(ClientContext clientContext) throws IOException { + return new GrpcBigQueryWriteStub(BigQueryWriteStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryWriteStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryWriteStub( + BigQueryWriteStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub(BigQueryWriteStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcBigQueryWriteCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub( + BigQueryWriteStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("parent", String.valueOf(request.getParent())); + return params.build(); + }) + .build(); + GrpcCallSettings appendRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(appendRowsMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("write_stream", String.valueOf(request.getWriteStream())); + return params.build(); + }) + .build(); + GrpcCallSettings getWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("name", String.valueOf(request.getName())); + return params.build(); + }) + .build(); + GrpcCallSettings + finalizeWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(finalizeWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("name", String.valueOf(request.getName())); + return params.build(); + }) + .build(); + GrpcCallSettings + batchCommitWriteStreamsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(batchCommitWriteStreamsMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("parent", String.valueOf(request.getParent())); + return params.build(); + }) + .build(); + GrpcCallSettings flushRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(flushRowsMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("write_stream", String.valueOf(request.getWriteStream())); + return params.build(); + }) + .build(); + + this.createWriteStreamCallable = + callableFactory.createUnaryCallable( + createWriteStreamTransportSettings, + settings.createWriteStreamSettings(), + clientContext); + this.appendRowsCallable = + callableFactory.createBidiStreamingCallable( + appendRowsTransportSettings, settings.appendRowsSettings(), clientContext); + this.getWriteStreamCallable = + callableFactory.createUnaryCallable( + getWriteStreamTransportSettings, settings.getWriteStreamSettings(), clientContext); + this.finalizeWriteStreamCallable = + callableFactory.createUnaryCallable( + finalizeWriteStreamTransportSettings, + settings.finalizeWriteStreamSettings(), + clientContext); + this.batchCommitWriteStreamsCallable = + callableFactory.createUnaryCallable( + batchCommitWriteStreamsTransportSettings, + settings.batchCommitWriteStreamsSettings(), + clientContext); + this.flushRowsCallable = + callableFactory.createUnaryCallable( + flushRowsTransportSettings, settings.flushRowsSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createWriteStreamCallable() { + return createWriteStreamCallable; + } + + @Override + public BidiStreamingCallable appendRowsCallable() { + return appendRowsCallable; + } + + @Override + public UnaryCallable getWriteStreamCallable() { + return getWriteStreamCallable; + } + + @Override + public UnaryCallable + finalizeWriteStreamCallable() { + return finalizeWriteStreamCallable; + } + + @Override + public UnaryCallable + batchCommitWriteStreamsCallable() { + return batchCommitWriteStreamsCallable; + } + + @Override + public UnaryCallable flushRowsCallable() { + return flushRowsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java index 084df87b2f..ba83560db3 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java @@ -88,7 +88,7 @@ public void createReadSessionTest() throws Exception { .setName("name3373707") .setExpireTime(Timestamp.newBuilder().build()) .setDataFormat(DataFormat.forNumber(0)) - .setTable("table110115790") + .setTable(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) .setTableModifiers(ReadSession.TableModifiers.newBuilder().build()) .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) .addAllStreams(new ArrayList()) @@ -139,7 +139,7 @@ public void createReadSessionTest2() throws Exception { .setName("name3373707") .setExpireTime(Timestamp.newBuilder().build()) .setDataFormat(DataFormat.forNumber(0)) - .setTable("table110115790") + .setTable(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) .setTableModifiers(ReadSession.TableModifiers.newBuilder().build()) .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) .addAllStreams(new ArrayList()) diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java new file mode 100644 index 0000000000..92221e7415 --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java @@ -0,0 +1,500 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class BigQueryWriteClientTest { + private static MockBigQueryWrite mockBigQueryWrite; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private BigQueryWriteClient client; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryWrite = new MockBigQueryWrite(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + BigQueryWriteSettings settings = + BigQueryWriteSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BigQueryWriteClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createWriteStreamTest() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + WriteStream writeStream = WriteStream.newBuilder().build(); + + WriteStream actualResponse = client.createWriteStream(parent, writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateWriteStreamRequest actualRequest = ((CreateWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + WriteStream writeStream = WriteStream.newBuilder().build(); + client.createWriteStream(parent, writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createWriteStreamTest2() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String parent = "parent-995424086"; + WriteStream writeStream = WriteStream.newBuilder().build(); + + WriteStream actualResponse = client.createWriteStream(parent, writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateWriteStreamRequest actualRequest = ((CreateWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String parent = "parent-995424086"; + WriteStream writeStream = WriteStream.newBuilder().build(); + client.createWriteStream(parent, writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void appendRowsTest() throws Exception { + AppendRowsResponse expectedResponse = + AppendRowsResponse.newBuilder().setUpdatedSchema(TableSchema.newBuilder().build()).build(); + mockBigQueryWrite.addResponse(expectedResponse); + AppendRowsRequest request = + AppendRowsRequest.newBuilder() + .setWriteStream( + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setOffset(Int64Value.newBuilder().build()) + .setTraceId("traceId-1067401920") + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void appendRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + AppendRowsRequest request = + AppendRowsRequest.newBuilder() + .setWriteStream( + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setOffset(Int64Value.newBuilder().build()) + .setTraceId("traceId-1067401920") + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void getWriteStreamTest() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + WriteStream actualResponse = client.getWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetWriteStreamRequest actualRequest = ((GetWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.getWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getWriteStreamTest2() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String name = "name3373707"; + + WriteStream actualResponse = client.getWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetWriteStreamRequest actualRequest = ((GetWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String name = "name3373707"; + client.getWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void finalizeWriteStreamTest() throws Exception { + FinalizeWriteStreamResponse expectedResponse = + FinalizeWriteStreamResponse.newBuilder().setRowCount(1340416618).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FinalizeWriteStreamRequest actualRequest = ((FinalizeWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void finalizeWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.finalizeWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void finalizeWriteStreamTest2() throws Exception { + FinalizeWriteStreamResponse expectedResponse = + FinalizeWriteStreamResponse.newBuilder().setRowCount(1340416618).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String name = "name3373707"; + + FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FinalizeWriteStreamRequest actualRequest = ((FinalizeWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void finalizeWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String name = "name3373707"; + client.finalizeWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCommitWriteStreamsTest() throws Exception { + BatchCommitWriteStreamsResponse expectedResponse = + BatchCommitWriteStreamsResponse.newBuilder() + .setCommitTime(Timestamp.newBuilder().build()) + .addAllStreamErrors(new ArrayList()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCommitWriteStreamsRequest actualRequest = + ((BatchCommitWriteStreamsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCommitWriteStreamsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String parent = "parent-995424086"; + client.batchCommitWriteStreams(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void flushRowsTest() throws Exception { + FlushRowsResponse expectedResponse = + FlushRowsResponse.newBuilder().setOffset(-1019779949).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName writeStream = + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + FlushRowsResponse actualResponse = client.flushRows(writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FlushRowsRequest actualRequest = ((FlushRowsRequest) actualRequests.get(0)); + + Assert.assertEquals(writeStream.toString(), actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void flushRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName writeStream = + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.flushRows(writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void flushRowsTest2() throws Exception { + FlushRowsResponse expectedResponse = + FlushRowsResponse.newBuilder().setOffset(-1019779949).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String writeStream = "writeStream1412231231"; + + FlushRowsResponse actualResponse = client.flushRows(writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FlushRowsRequest actualRequest = ((FlushRowsRequest) actualRequests.get(0)); + + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void flushRowsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String writeStream = "writeStream1412231231"; + client.flushRows(writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java new file mode 100644 index 0000000000..e1e255bcc5 --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java @@ -0,0 +1,59 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryWrite implements MockGrpcService { + private final MockBigQueryWriteImpl serviceImpl; + + public MockBigQueryWrite() { + serviceImpl = new MockBigQueryWriteImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java new file mode 100644 index 0000000000..33d2a5ef3d --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java @@ -0,0 +1,204 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteGrpc.BigQueryWriteImplBase; +import com.google.protobuf.AbstractMessage; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryWriteImpl extends BigQueryWriteImplBase { + private List requests; + private Queue responses; + + public MockBigQueryWriteImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createWriteStream( + CreateWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext(((WriteStream) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + WriteStream.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public StreamObserver appendRows( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(AppendRowsRequest value) { + requests.add(value); + final Object response = responses.remove(); + if (response instanceof AppendRowsResponse) { + responseObserver.onNext(((AppendRowsResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method AppendRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + AppendRowsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + @Override + public void getWriteStream( + GetWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext(((WriteStream) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + WriteStream.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void finalizeWriteStream( + FinalizeWriteStreamRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof FinalizeWriteStreamResponse) { + requests.add(request); + responseObserver.onNext(((FinalizeWriteStreamResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method FinalizeWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + FinalizeWriteStreamResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchCommitWriteStreamsResponse) { + requests.add(request); + responseObserver.onNext(((BatchCommitWriteStreamsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BatchCommitWriteStreamsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void flushRows( + FlushRowsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof FlushRowsResponse) { + requests.add(request); + responseObserver.onNext(((FlushRowsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method FlushRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + FlushRowsResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java new file mode 100644 index 0000000000..f7d316dede --- /dev/null +++ b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java @@ -0,0 +1,1068 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * BigQuery Write API.
+ * The Write API can be used to write data to BigQuery.
+ * For supplementary information about the Write API, see:
+ * https://cloud.google.com/bigquery/docs/write-api
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler", + comments = "Source: google/cloud/bigquery/storage/v1/storage.proto") +public final class BigQueryWriteGrpc { + + private BigQueryWriteGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.bigquery.storage.v1.BigQueryWrite"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getCreateWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateWriteStream", + requestType = com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getCreateWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getCreateWriteStreamMethod; + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + BigQueryWriteGrpc.getCreateWriteStreamMethod = + getCreateWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("CreateWriteStream")) + .build(); + } + } + } + return getCreateWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse> + getAppendRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "AppendRows", + requestType = com.google.cloud.bigquery.storage.v1.AppendRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.AppendRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse> + getAppendRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse> + getAppendRowsMethod; + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + BigQueryWriteGrpc.getAppendRowsMethod = + getAppendRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "AppendRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("AppendRows")) + .build(); + } + } + } + return getAppendRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getGetWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetWriteStream", + requestType = com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getGetWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getGetWriteStreamMethod; + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + BigQueryWriteGrpc.getGetWriteStreamMethod = + getGetWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("GetWriteStream")) + .build(); + } + } + } + return getGetWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FinalizeWriteStream", + requestType = com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) + == null) { + BigQueryWriteGrpc.getFinalizeWriteStreamMethod = + getFinalizeWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "FinalizeWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("FinalizeWriteStream")) + .build(); + } + } + } + return getFinalizeWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCommitWriteStreams", + requestType = com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod = + getBatchCommitWriteStreamsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCommitWriteStreams")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("BatchCommitWriteStreams")) + .build(); + } + } + } + return getBatchCommitWriteStreamsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + getFlushRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FlushRows", + requestType = com.google.cloud.bigquery.storage.v1.FlushRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.FlushRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + getFlushRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + getFlushRowsMethod; + if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { + BigQueryWriteGrpc.getFlushRowsMethod = + getFlushRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "FlushRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("FlushRows")) + .build(); + } + } + } + return getFlushRowsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryWriteStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteStub(channel, callOptions); + } + }; + return BigQueryWriteStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryWriteBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingStub(channel, callOptions); + } + }; + return BigQueryWriteBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryWriteFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteFutureStub(channel, callOptions); + } + }; + return BigQueryWriteFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public abstract static class BigQueryWriteImplBase implements io.grpc.BindableService { + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public void createWriteStream( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provides an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * connection. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains an optional offset at which the append
+     * happened.  No offset information will be returned for appends to a
+     * default stream.
+     * Responses are received in the same order in which requests are sent.
+     * There will be one response for each successful inserted request.  Responses
+     * may optionally embed error information if the originating AppendRequest was
+     * not successfully processed.
+     * The specifics of when successfully appended data is made visible to the
+     * table are governed by the type of stream:
+     * * For COMMITTED streams (which includes the default stream), data is
+     * visible immediately upon successful append.
+     * * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
+     * rpc which advances a cursor to a newer offset in the stream.
+     * * For PENDING streams, data is not made visible until the stream itself is
+     * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
+     * committed via the `BatchCommitWriteStreams` rpc.
+     * 
+ */ + public io.grpc.stub.StreamObserver + appendRows( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getAppendRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public void getWriteStream( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getFinalizeWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCommitWriteStreamsMethod(), responseObserver); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public void flushRows( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getFlushRowsMethod(), responseObserver); + } + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream>( + this, METHODID_CREATE_WRITE_STREAM))) + .addMethod( + getAppendRowsMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse>( + this, METHODID_APPEND_ROWS))) + .addMethod( + getGetWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream>( + this, METHODID_GET_WRITE_STREAM))) + .addMethod( + getFinalizeWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse>( + this, METHODID_FINALIZE_WRITE_STREAM))) + .addMethod( + getBatchCommitWriteStreamsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse>( + this, METHODID_BATCH_COMMIT_WRITE_STREAMS))) + .addMethod( + getFlushRowsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse>( + this, METHODID_FLUSH_ROWS))) + .build(); + } + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public static final class BigQueryWriteStub + extends io.grpc.stub.AbstractAsyncStub { + private BigQueryWriteStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public void createWriteStream( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provides an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * connection. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains an optional offset at which the append
+     * happened.  No offset information will be returned for appends to a
+     * default stream.
+     * Responses are received in the same order in which requests are sent.
+     * There will be one response for each successful inserted request.  Responses
+     * may optionally embed error information if the originating AppendRequest was
+     * not successfully processed.
+     * The specifics of when successfully appended data is made visible to the
+     * table are governed by the type of stream:
+     * * For COMMITTED streams (which includes the default stream), data is
+     * visible immediately upon successful append.
+     * * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
+     * rpc which advances a cursor to a newer offset in the stream.
+     * * For PENDING streams, data is not made visible until the stream itself is
+     * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
+     * committed via the `BatchCommitWriteStreams` rpc.
+     * 
+ */ + public io.grpc.stub.StreamObserver + appendRows( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getAppendRowsMethod(), getCallOptions()), responseObserver); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public void getWriteStream( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public void flushRows( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public static final class BigQueryWriteBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryWriteBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.WriteStream createWriteStream( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse flushRows( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getFlushRowsMethod(), getCallOptions(), request); + } + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public static final class BigQueryWriteFutureStub + extends io.grpc.stub.AbstractFutureStub { + private BigQueryWriteFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.WriteStream> + createWriteStream(com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.WriteStream> + getWriteStream(com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + flushRows(com.google.cloud.bigquery.storage.v1.FlushRowsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_WRITE_STREAM = 0; + private static final int METHODID_GET_WRITE_STREAM = 1; + private static final int METHODID_FINALIZE_WRITE_STREAM = 2; + private static final int METHODID_BATCH_COMMIT_WRITE_STREAMS = 3; + private static final int METHODID_FLUSH_ROWS = 4; + private static final int METHODID_APPEND_ROWS = 5; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final BigQueryWriteImplBase serviceImpl; + private final int methodId; + + MethodHandlers(BigQueryWriteImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_WRITE_STREAM: + serviceImpl.createWriteStream( + (com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_WRITE_STREAM: + serviceImpl.getWriteStream( + (com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_FINALIZE_WRITE_STREAM: + serviceImpl.finalizeWriteStream( + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse>) + responseObserver); + break; + case METHODID_BATCH_COMMIT_WRITE_STREAMS: + serviceImpl.batchCommitWriteStreams( + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse>) + responseObserver); + break; + case METHODID_FLUSH_ROWS: + serviceImpl.flushRows( + (com.google.cloud.bigquery.storage.v1.FlushRowsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_APPEND_ROWS: + return (io.grpc.stub.StreamObserver) + serviceImpl.appendRows( + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse>) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + private abstract static class BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryWriteBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryWrite"); + } + } + + private static final class BigQueryWriteFileDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier { + BigQueryWriteFileDescriptorSupplier() {} + } + + private static final class BigQueryWriteMethodDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + BigQueryWriteMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryWriteGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryWriteFileDescriptorSupplier()) + .addMethod(getCreateWriteStreamMethod()) + .addMethod(getAppendRowsMethod()) + .addMethod(getGetWriteStreamMethod()) + .addMethod(getFinalizeWriteStreamMethod()) + .addMethod(getBatchCommitWriteStreamsMethod()) + .addMethod(getFlushRowsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java new file mode 100644 index 0000000000..ae605a3b8e --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java @@ -0,0 +1,2777 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `AppendRows`.
+ * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
+ * parts of the AppendRowsRequest need only be specified for the first request
+ * sent each time the gRPC network connection is opened/reopened.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest} + */ +public final class AppendRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest) + AppendRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use AppendRowsRequest.newBuilder() to construct. + private AppendRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsRequest() { + writeStream_ = ""; + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private AppendRowsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + writeStream_ = s; + break; + } + case 18: + { + com.google.protobuf.Int64Value.Builder subBuilder = null; + if (offset_ != null) { + subBuilder = offset_.toBuilder(); + } + offset_ = + input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(offset_); + offset_ = subBuilder.buildPartial(); + } + + break; + } + case 34: + { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder subBuilder = + null; + if (rowsCase_ == 4) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_) + .toBuilder(); + } + rows_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_); + rows_ = subBuilder.buildPartial(); + } + rowsCase_ = 4; + break; + } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + + traceId_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.Builder.class); + } + + public interface ProtoDataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Proto schema used to serialize the data.  This value only needs to be
+     * provided as part of the first request on a gRPC network connection,
+     * and will be ignored for subsequent requests on the connection.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + boolean hasWriterSchema(); + /** + * + * + *
+     * Proto schema used to serialize the data.  This value only needs to be
+     * provided as part of the first request on a gRPC network connection,
+     * and will be ignored for subsequent requests on the connection.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema(); + /** + * + * + *
+     * Proto schema used to serialize the data.  This value only needs to be
+     * provided as part of the first request on a gRPC network connection,
+     * and will be ignored for subsequent requests on the connection.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchemaOrBuilder(); + + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + boolean hasRows(); + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return The rows. + */ + com.google.cloud.bigquery.storage.v1.ProtoRows getRows(); + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder getRowsOrBuilder(); + } + /** + * + * + *
+   * ProtoData contains the data rows and schema when constructing append
+   * requests.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData} + */ + public static final class ProtoData extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + ProtoDataOrBuilder { + private static final long serialVersionUID = 0L; + // Use ProtoData.newBuilder() to construct. + private ProtoData(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoData() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoData(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ProtoData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder subBuilder = null; + if (writerSchema_ != null) { + subBuilder = writerSchema_.toBuilder(); + } + writerSchema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1.ProtoSchema.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(writerSchema_); + writerSchema_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder subBuilder = null; + if (rows_ != null) { + subBuilder = rows_.toBuilder(); + } + rows_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1.ProtoRows.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(rows_); + rows_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder.class); + } + + public static final int WRITER_SCHEMA_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1.ProtoSchema writerSchema_; + /** + * + * + *
+     * Proto schema used to serialize the data.  This value only needs to be
+     * provided as part of the first request on a gRPC network connection,
+     * and will be ignored for subsequent requests on the connection.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + @java.lang.Override + public boolean hasWriterSchema() { + return writerSchema_ != null; + } + /** + * + * + *
+     * Proto schema used to serialize the data.  This value only needs to be
+     * provided as part of the first request on a gRPC network connection,
+     * and will be ignored for subsequent requests on the connection.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + /** + * + * + *
+     * Proto schema used to serialize the data.  This value only needs to be
+     * provided as part of the first request on a gRPC network connection,
+     * and will be ignored for subsequent requests on the connection.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchemaOrBuilder() { + return getWriterSchema(); + } + + public static final int ROWS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.ProtoRows rows_; + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + @java.lang.Override + public boolean hasRows() { + return rows_ != null; + } + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return The rows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows getRows() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance() + : rows_; + } + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder getRowsOrBuilder() { + return getRows(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (writerSchema_ != null) { + output.writeMessage(1, getWriterSchema()); + } + if (rows_ != null) { + output.writeMessage(2, getRows()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (writerSchema_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getWriterSchema()); + } + if (rows_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRows()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData other = + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) obj; + + if (hasWriterSchema() != other.hasWriterSchema()) return false; + if (hasWriterSchema()) { + if (!getWriterSchema().equals(other.getWriterSchema())) return false; + } + if (hasRows() != other.hasRows()) return false; + if (hasRows()) { + if (!getRows().equals(other.getRows())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasWriterSchema()) { + hash = (37 * hash) + WRITER_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getWriterSchema().hashCode(); + } + if (hasRows()) { + hash = (37 * hash) + ROWS_FIELD_NUMBER; + hash = (53 * hash) + getRows().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * ProtoData contains the data rows and schema when constructing append
+     * requests.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (writerSchemaBuilder_ == null) { + writerSchema_ = null; + } else { + writerSchema_ = null; + writerSchemaBuilder_ = null; + } + if (rowsBuilder_ == null) { + rows_ = null; + } else { + rows_ = null; + rowsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData build() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData result = + new com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData(this); + if (writerSchemaBuilder_ == null) { + result.writerSchema_ = writerSchema_; + } else { + result.writerSchema_ = writerSchemaBuilder_.build(); + } + if (rowsBuilder_ == null) { + result.rows_ = rows_; + } else { + result.rows_ = rowsBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData other) { + if (other + == com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance()) return this; + if (other.hasWriterSchema()) { + mergeWriterSchema(other.getWriterSchema()); + } + if (other.hasRows()) { + mergeRows(other.getRows()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.cloud.bigquery.storage.v1.ProtoSchema writerSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoSchema, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder> + writerSchemaBuilder_; + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + public boolean hasWriterSchema() { + return writerSchemaBuilder_ != null || writerSchema_ != null; + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { + if (writerSchemaBuilder_ == null) { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance() + : writerSchema_; + } else { + return writerSchemaBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writerSchema_ = value; + onChanged(); + } else { + writerSchemaBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema( + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder builderForValue) { + if (writerSchemaBuilder_ == null) { + writerSchema_ = builderForValue.build(); + onChanged(); + } else { + writerSchemaBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder mergeWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (writerSchema_ != null) { + writerSchema_ = + com.google.cloud.bigquery.storage.v1.ProtoSchema.newBuilder(writerSchema_) + .mergeFrom(value) + .buildPartial(); + } else { + writerSchema_ = value; + } + onChanged(); + } else { + writerSchemaBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder clearWriterSchema() { + if (writerSchemaBuilder_ == null) { + writerSchema_ = null; + onChanged(); + } else { + writerSchema_ = null; + writerSchemaBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder getWriterSchemaBuilder() { + + onChanged(); + return getWriterSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchemaOrBuilder() { + if (writerSchemaBuilder_ != null) { + return writerSchemaBuilder_.getMessageOrBuilder(); + } else { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + } + /** + * + * + *
+       * Proto schema used to serialize the data.  This value only needs to be
+       * provided as part of the first request on a gRPC network connection,
+       * and will be ignored for subsequent requests on the connection.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoSchema, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder> + getWriterSchemaFieldBuilder() { + if (writerSchemaBuilder_ == null) { + writerSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoSchema, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder>( + getWriterSchema(), getParentForChildren(), isClean()); + writerSchema_ = null; + } + return writerSchemaBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.ProtoRows rows_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoRows, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder> + rowsBuilder_; + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + public boolean hasRows() { + return rowsBuilder_ != null || rows_ != null; + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return The rows. + */ + public com.google.cloud.bigquery.storage.v1.ProtoRows getRows() { + if (rowsBuilder_ == null) { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance() + : rows_; + } else { + return rowsBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder setRows(com.google.cloud.bigquery.storage.v1.ProtoRows value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + rowsBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder setRows( + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder builderForValue) { + if (rowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + rowsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder mergeRows(com.google.cloud.bigquery.storage.v1.ProtoRows value) { + if (rowsBuilder_ == null) { + if (rows_ != null) { + rows_ = + com.google.cloud.bigquery.storage.v1.ProtoRows.newBuilder(rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + rowsBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder clearRows() { + if (rowsBuilder_ == null) { + rows_ = null; + onChanged(); + } else { + rows_ = null; + rowsBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1.ProtoRows.Builder getRowsBuilder() { + + onChanged(); + return getRowsFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder getRowsOrBuilder() { + if (rowsBuilder_ != null) { + return rowsBuilder_.getMessageOrBuilder(); + } else { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance() + : rows_; + } + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoRows, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder> + getRowsFieldBuilder() { + if (rowsBuilder_ == null) { + rowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoRows, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder>( + getRows(), getParentForChildren(), isClean()); + rows_ = null; + } + return rowsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProtoData(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public enum RowsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PROTO_ROWS(4), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 4: + return PROTO_ROWS; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 1; + private volatile java.lang.Object writeStream_; + /** + * + * + *
+   * Required. The write_stream identifies the target of the append operation, and only
+   * needs to be specified as part of the first request on the gRPC connection.
+   * If provided for subsequent requests, it must match the value of the first
+   * request.
+   * For explicitly created write streams, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   * For the special default stream, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + @java.lang.Override + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The write_stream identifies the target of the append operation, and only
+   * needs to be specified as part of the first request on the gRPC connection.
+   * If provided for subsequent requests, it must match the value of the first
+   * request.
+   * For explicitly created write streams, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   * For the special default stream, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private com.google.protobuf.Int64Value offset_; + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return offset_ != null; + } + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return getOffset(); + } + + public static final int PROTO_ROWS_FIELD_NUMBER = 4; + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + @java.lang.Override + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData getProtoRows() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.getDefaultInstance(); + } + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.getDefaultInstance(); + } + + public static final int TRACE_ID_FIELD_NUMBER = 6; + private volatile java.lang.Object traceId_; + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasProtoRows()) { + if (!getProtoRows().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getWriteStreamBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); + } + if (offset_ != null) { + output.writeMessage(2, getOffset()); + } + if (rowsCase_ == 4) { + output.writeMessage( + 4, (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_); + } + if (!getTraceIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, traceId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getWriteStreamBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); + } + if (offset_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_); + } + if (!getTraceIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, traceId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsRequest other = + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest) obj; + + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 4: + if (!getProtoRows().equals(other.getProtoRows())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + switch (rowsCase_) { + case 4: + hash = (37 * hash) + PROTO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getProtoRows().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `AppendRows`.
+   * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
+   * parts of the AppendRowsRequest need only be specified for the first request
+   * sent each time the gRPC network connection is opened/reopened.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest) + com.google.cloud.bigquery.storage.v1.AppendRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.AppendRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + writeStream_ = ""; + + if (offsetBuilder_ == null) { + offset_ = null; + } else { + offset_ = null; + offsetBuilder_ = null; + } + traceId_ = ""; + + rowsCase_ = 0; + rows_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest build() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest result = + new com.google.cloud.bigquery.storage.v1.AppendRowsRequest(this); + result.writeStream_ = writeStream_; + if (offsetBuilder_ == null) { + result.offset_ = offset_; + } else { + result.offset_ = offsetBuilder_.build(); + } + if (rowsCase_ == 4) { + if (protoRowsBuilder_ == null) { + result.rows_ = rows_; + } else { + result.rows_ = protoRowsBuilder_.build(); + } + } + result.traceId_ = traceId_; + result.rowsCase_ = rowsCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.AppendRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AppendRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.AppendRowsRequest.getDefaultInstance()) + return this; + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + onChanged(); + } + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + onChanged(); + } + switch (other.getRowsCase()) { + case PROTO_ROWS: + { + mergeProtoRows(other.getProtoRows()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasProtoRows()) { + if (!getProtoRows().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private java.lang.Object writeStream_ = ""; + /** + * + * + *
+     * Required. The write_stream identifies the target of the append operation, and only
+     * needs to be specified as part of the first request on the gRPC connection.
+     * If provided for subsequent requests, it must match the value of the first
+     * request.
+     * For explicitly created write streams, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     * For the special default stream, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The write_stream identifies the target of the append operation, and only
+     * needs to be specified as part of the first request on the gRPC connection.
+     * If provided for subsequent requests, it must match the value of the first
+     * request.
+     * For explicitly created write streams, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     * For the special default stream, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The write_stream identifies the target of the append operation, and only
+     * needs to be specified as part of the first request on the gRPC connection.
+     * If provided for subsequent requests, it must match the value of the first
+     * request.
+     * For explicitly created write streams, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     * For the special default stream, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + writeStream_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The write_stream identifies the target of the append operation, and only
+     * needs to be specified as part of the first request on the gRPC connection.
+     * If provided for subsequent requests, it must match the value of the first
+     * request.
+     * For explicitly created write streams, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     * For the special default stream, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + + writeStream_ = getDefaultInstance().getWriteStream(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The write_stream identifies the target of the append operation, and only
+     * needs to be specified as part of the first request on the gRPC connection.
+     * If provided for subsequent requests, it must match the value of the first
+     * request.
+     * For explicitly created write streams, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     * For the special default stream, the format is:
+     * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + writeStream_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return offsetBuilder_ != null || offset_ != null; + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + onChanged(); + } else { + offsetBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + onChanged(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (offset_ != null) { + offset_ = + com.google.protobuf.Int64Value.newBuilder(offset_).mergeFrom(value).buildPartial(); + } else { + offset_ = value; + } + onChanged(); + } else { + offsetBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder clearOffset() { + if (offsetBuilder_ == null) { + offset_ = null; + onChanged(); + } else { + offset_ = null; + offsetBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder> + protoRowsBuilder_; + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + @java.lang.Override + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData getProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return protoRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + protoRowsBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder builderForValue) { + if (protoRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + protoRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder mergeProtoRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.newBuilder( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + protoRowsBuilder_.mergeFrom(value); + } + protoRowsBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder clearProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + protoRowsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder + getProtoRowsBuilder() { + return getProtoRowsFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if ((rowsCase_ == 4) && (protoRowsBuilder_ != null)) { + return protoRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder> + getProtoRowsFieldBuilder() { + if (protoRowsBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.getDefaultInstance(); + } + protoRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + ; + return protoRowsBuilder_; + } + + private java.lang.Object traceId_ = ""; + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + traceId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + + traceId_ = getDefaultInstance().getTraceId(); + onChanged(); + return this; + } + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + traceId_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AppendRowsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java new file mode 100644 index 0000000000..d25b3cc37f --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java @@ -0,0 +1,176 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface AppendRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The write_stream identifies the target of the append operation, and only
+   * needs to be specified as part of the first request on the gRPC connection.
+   * If provided for subsequent requests, it must match the value of the first
+   * request.
+   * For explicitly created write streams, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   * For the special default stream, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + /** + * + * + *
+   * Required. The write_stream identifies the target of the append operation, and only
+   * needs to be specified as part of the first request on the gRPC connection.
+   * If provided for subsequent requests, it must match the value of the first
+   * request.
+   * For explicitly created write streams, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   * For the special default stream, the format is:
+   * `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + boolean hasProtoRows(); + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData getProtoRows(); + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder getProtoRowsOrBuilder(); + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); + + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.RowsCase getRowsCase(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java new file mode 100644 index 0000000000..62e2520af3 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java @@ -0,0 +1,2403 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response message for `AppendRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse} + */ +public final class AppendRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse) + AppendRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use AppendRowsResponse.newBuilder() to construct. + private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private AppendRowsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder + subBuilder = null; + if (responseCase_ == 1) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + response_) + .toBuilder(); + } + response_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + response_); + response_ = subBuilder.buildPartial(); + } + responseCase_ = 1; + break; + } + case 18: + { + com.google.rpc.Status.Builder subBuilder = null; + if (responseCase_ == 2) { + subBuilder = ((com.google.rpc.Status) response_).toBuilder(); + } + response_ = input.readMessage(com.google.rpc.Status.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.rpc.Status) response_); + response_ = subBuilder.buildPartial(); + } + responseCase_ = 2; + break; + } + case 26: + { + com.google.cloud.bigquery.storage.v1.TableSchema.Builder subBuilder = null; + if (updatedSchema_ != null) { + subBuilder = updatedSchema_.toBuilder(); + } + updatedSchema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1.TableSchema.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(updatedSchema_); + updatedSchema_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.Builder.class); + } + + public interface AppendResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); + } + /** + * + * + *
+   * AppendResult is returned for successful append requests.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult} + */ + public static final class AppendResult extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + AppendResultOrBuilder { + private static final long serialVersionUID = 0L; + // Use AppendResult.newBuilder() to construct. + private AppendResult(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendResult() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendResult(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private AppendResult( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.Int64Value.Builder subBuilder = null; + if (offset_ != null) { + subBuilder = offset_.toBuilder(); + } + offset_ = + input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(offset_); + offset_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder.class); + } + + public static final int OFFSET_FIELD_NUMBER = 1; + private com.google.protobuf.Int64Value offset_; + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return offset_ != null; + } + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return getOffset(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (offset_ != null) { + output.writeMessage(1, getOffset()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (offset_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getOffset()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult other = + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) obj; + + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * AppendResult is returned for successful append requests.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (offsetBuilder_ == null) { + offset_ = null; + } else { + offset_ = null; + offsetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult build() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult result = + new com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult(this); + if (offsetBuilder_ == null) { + result.offset_ = offset_; + } else { + result.offset_ = offsetBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult other) { + if (other + == com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance()) return this; + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return offsetBuilder_ != null || offset_ != null; + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + onChanged(); + } else { + offsetBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + onChanged(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (offset_ != null) { + offset_ = + com.google.protobuf.Int64Value.newBuilder(offset_).mergeFrom(value).buildPartial(); + } else { + offset_ = value; + } + onChanged(); + } else { + offsetBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder clearOffset() { + if (offsetBuilder_ == null) { + offset_ = null; + onChanged(); + } else { + offset_ = null; + offsetBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AppendResult(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public enum ResponseCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + APPEND_RESULT(1), + ERROR(2), + RESPONSE_NOT_SET(0); + private final int value; + + private ResponseCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResponseCase valueOf(int value) { + return forNumber(value); + } + + public static ResponseCase forNumber(int value) { + switch (value) { + case 1: + return APPEND_RESULT; + case 2: + return ERROR; + case 0: + return RESPONSE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public static final int APPEND_RESULT_FIELD_NUMBER = 1; + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + @java.lang.Override + public boolean hasAppendResult() { + return responseCase_ == 1; + } + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult getAppendResult() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + public static final int ERROR_FIELD_NUMBER = 2; + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + @java.lang.Override + public boolean hasError() { + return responseCase_ == 2; + } + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + @java.lang.Override + public com.google.rpc.Status getError() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + + public static final int UPDATED_SCHEMA_FIELD_NUMBER = 3; + private com.google.cloud.bigquery.storage.v1.TableSchema updatedSchema_; + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + @java.lang.Override + public boolean hasUpdatedSchema() { + return updatedSchema_ != null; + } + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema() { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : updatedSchema_; + } + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchemaOrBuilder() { + return getUpdatedSchema(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (responseCase_ == 1) { + output.writeMessage( + 1, (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_); + } + if (responseCase_ == 2) { + output.writeMessage(2, (com.google.rpc.Status) response_); + } + if (updatedSchema_ != null) { + output.writeMessage(3, getUpdatedSchema()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (responseCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_); + } + if (responseCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.rpc.Status) response_); + } + if (updatedSchema_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdatedSchema()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsResponse other = + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse) obj; + + if (hasUpdatedSchema() != other.hasUpdatedSchema()) return false; + if (hasUpdatedSchema()) { + if (!getUpdatedSchema().equals(other.getUpdatedSchema())) return false; + } + if (!getResponseCase().equals(other.getResponseCase())) return false; + switch (responseCase_) { + case 1: + if (!getAppendResult().equals(other.getAppendResult())) return false; + break; + case 2: + if (!getError().equals(other.getError())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasUpdatedSchema()) { + hash = (37 * hash) + UPDATED_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUpdatedSchema().hashCode(); + } + switch (responseCase_) { + case 1: + hash = (37 * hash) + APPEND_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getAppendResult().hashCode(); + break; + case 2: + hash = (37 * hash) + ERROR_FIELD_NUMBER; + hash = (53 * hash) + getError().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Response message for `AppendRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse) + com.google.cloud.bigquery.storage.v1.AppendRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.AppendRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (updatedSchemaBuilder_ == null) { + updatedSchema_ = null; + } else { + updatedSchema_ = null; + updatedSchemaBuilder_ = null; + } + responseCase_ = 0; + response_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse build() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse result = + new com.google.cloud.bigquery.storage.v1.AppendRowsResponse(this); + if (responseCase_ == 1) { + if (appendResultBuilder_ == null) { + result.response_ = response_; + } else { + result.response_ = appendResultBuilder_.build(); + } + } + if (responseCase_ == 2) { + if (errorBuilder_ == null) { + result.response_ = response_; + } else { + result.response_ = errorBuilder_.build(); + } + } + if (updatedSchemaBuilder_ == null) { + result.updatedSchema_ = updatedSchema_; + } else { + result.updatedSchema_ = updatedSchemaBuilder_.build(); + } + result.responseCase_ = responseCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.AppendRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AppendRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1.AppendRowsResponse.getDefaultInstance()) + return this; + if (other.hasUpdatedSchema()) { + mergeUpdatedSchema(other.getUpdatedSchema()); + } + switch (other.getResponseCase()) { + case APPEND_RESULT: + { + mergeAppendResult(other.getAppendResult()); + break; + } + case ERROR: + { + mergeError(other.getError()); + break; + } + case RESPONSE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public Builder clearResponse() { + responseCase_ = 0; + response_ = null; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder> + appendResultBuilder_; + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + @java.lang.Override + public boolean hasAppendResult() { + return responseCase_ == 1; + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult getAppendResult() { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } else { + if (responseCase_ == 1) { + return appendResultBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder setAppendResult( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult value) { + if (appendResultBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + appendResultBuilder_.setMessage(value); + } + responseCase_ = 1; + return this; + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder setAppendResult( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder + builderForValue) { + if (appendResultBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + appendResultBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 1; + return this; + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder mergeAppendResult( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult value) { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1 + && response_ + != com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance()) { + response_ = + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.newBuilder( + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 1) { + appendResultBuilder_.mergeFrom(value); + } + appendResultBuilder_.setMessage(value); + } + responseCase_ = 1; + return this; + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder clearAppendResult() { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + } + appendResultBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder + getAppendResultBuilder() { + return getAppendResultFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder() { + if ((responseCase_ == 1) && (appendResultBuilder_ != null)) { + return appendResultBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder> + getAppendResultFieldBuilder() { + if (appendResultBuilder_ == null) { + if (!(responseCase_ == 1)) { + response_ = + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + appendResultBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_, + getParentForChildren(), + isClean()); + response_ = null; + } + responseCase_ = 1; + onChanged(); + ; + return appendResultBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + errorBuilder_; + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + @java.lang.Override + public boolean hasError() { + return responseCase_ == 2; + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + @java.lang.Override + public com.google.rpc.Status getError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } else { + if (responseCase_ == 2) { + return errorBuilder_.getMessage(); + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + errorBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status.Builder builderForValue) { + if (errorBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + errorBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 2; + return this; + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder mergeError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (responseCase_ == 2 && response_ != com.google.rpc.Status.getDefaultInstance()) { + response_ = + com.google.rpc.Status.newBuilder((com.google.rpc.Status) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 2) { + errorBuilder_.mergeFrom(value); + } + errorBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder clearError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + } + errorBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public com.google.rpc.Status.Builder getErrorBuilder() { + return getErrorFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if ((responseCase_ == 2) && (errorBuilder_ != null)) { + return errorBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + getErrorFieldBuilder() { + if (errorBuilder_ == null) { + if (!(responseCase_ == 2)) { + response_ = com.google.rpc.Status.getDefaultInstance(); + } + errorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>( + (com.google.rpc.Status) response_, getParentForChildren(), isClean()); + response_ = null; + } + responseCase_ = 2; + onChanged(); + ; + return errorBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.TableSchema updatedSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + updatedSchemaBuilder_; + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + public boolean hasUpdatedSchema() { + return updatedSchemaBuilder_ != null || updatedSchema_ != null; + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + public com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema() { + if (updatedSchemaBuilder_ == null) { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : updatedSchema_; + } else { + return updatedSchemaBuilder_.getMessage(); + } + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder setUpdatedSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (updatedSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updatedSchema_ = value; + onChanged(); + } else { + updatedSchemaBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder setUpdatedSchema( + com.google.cloud.bigquery.storage.v1.TableSchema.Builder builderForValue) { + if (updatedSchemaBuilder_ == null) { + updatedSchema_ = builderForValue.build(); + onChanged(); + } else { + updatedSchemaBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder mergeUpdatedSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (updatedSchemaBuilder_ == null) { + if (updatedSchema_ != null) { + updatedSchema_ = + com.google.cloud.bigquery.storage.v1.TableSchema.newBuilder(updatedSchema_) + .mergeFrom(value) + .buildPartial(); + } else { + updatedSchema_ = value; + } + onChanged(); + } else { + updatedSchemaBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder clearUpdatedSchema() { + if (updatedSchemaBuilder_ == null) { + updatedSchema_ = null; + onChanged(); + } else { + updatedSchema_ = null; + updatedSchemaBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public com.google.cloud.bigquery.storage.v1.TableSchema.Builder getUpdatedSchemaBuilder() { + + onChanged(); + return getUpdatedSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchemaOrBuilder() { + if (updatedSchemaBuilder_ != null) { + return updatedSchemaBuilder_.getMessageOrBuilder(); + } else { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : updatedSchema_; + } + } + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + getUpdatedSchemaFieldBuilder() { + if (updatedSchemaBuilder_ == null) { + updatedSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder>( + getUpdatedSchema(), getParentForChildren(), isClean()); + updatedSchema_ = null; + } + return updatedSchemaBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AppendRowsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java new file mode 100644 index 0000000000..69a81948c0 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java @@ -0,0 +1,181 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface AppendRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + boolean hasAppendResult(); + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult getAppendResult(); + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder(); + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + boolean hasError(); + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + com.google.rpc.Status getError(); + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + */ + com.google.rpc.StatusOrBuilder getErrorOrBuilder(); + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + boolean hasUpdatedSchema(); + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema(); + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchemaOrBuilder(); + + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.ResponseCase getResponseCase(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java new file mode 100644 index 0000000000..04bc6438b8 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java @@ -0,0 +1,936 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `BatchCommitWriteStreams`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest} + */ +public final class BatchCommitWriteStreamsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + BatchCommitWriteStreamsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use BatchCommitWriteStreamsRequest.newBuilder() to construct. + private BatchCommitWriteStreamsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsRequest() { + parent_ = ""; + writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private BatchCommitWriteStreamsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + writeStreams_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + writeStreams_.add(s); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + writeStreams_ = writeStreams_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAMS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList writeStreams_; + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + return writeStreams_; + } + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < writeStreams_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, writeStreams_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + { + int dataSize = 0; + for (int i = 0; i < writeStreams_.size(); i++) { + dataSize += computeStringSizeNoTag(writeStreams_.getRaw(i)); + } + size += dataSize; + size += 1 * getWriteStreamsList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest other = + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getWriteStreamsList().equals(other.getWriteStreamsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getWriteStreamsCount() > 0) { + hash = (37 * hash) + WRITE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getWriteStreamsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest build() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest result = + new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest(this); + int from_bitField0_ = bitField0_; + result.parent_ = parent_; + if (((bitField0_ & 0x00000001) != 0)) { + writeStreams_ = writeStreams_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.writeStreams_ = writeStreams_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (!other.writeStreams_.isEmpty()) { + if (writeStreams_.isEmpty()) { + writeStreams_ = other.writeStreams_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureWriteStreamsIsMutable(); + writeStreams_.addAll(other.writeStreams_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList writeStreams_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureWriteStreamsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + writeStreams_ = new com.google.protobuf.LazyStringArrayList(writeStreams_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + return writeStreams_.getUnmodifiableView(); + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The writeStreams to set. + * @return This builder for chaining. + */ + public Builder setWriteStreams(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreams(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addAllWriteStreams(java.lang.Iterable values) { + ensureWriteStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, writeStreams_); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearWriteStreams() { + writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreamsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + private static final com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BatchCommitWriteStreamsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java new file mode 100644 index 0000000000..0d5eccc571 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java @@ -0,0 +1,103 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface BatchCommitWriteStreamsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + java.util.List getWriteStreamsList(); + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + int getWriteStreamsCount(); + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + java.lang.String getWriteStreams(int index); + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + com.google.protobuf.ByteString getWriteStreamsBytes(int index); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java new file mode 100644 index 0000000000..c9893c4634 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java @@ -0,0 +1,1362 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response message for `BatchCommitWriteStreams`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse} + */ +public final class BatchCommitWriteStreamsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + BatchCommitWriteStreamsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use BatchCommitWriteStreamsResponse.newBuilder() to construct. + private BatchCommitWriteStreamsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsResponse() { + streamErrors_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private BatchCommitWriteStreamsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (commitTime_ != null) { + subBuilder = commitTime_.toBuilder(); + } + commitTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(commitTime_); + commitTime_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + streamErrors_ = + new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + streamErrors_.add( + input.readMessage( + com.google.cloud.bigquery.storage.v1.StorageError.parser(), + extensionRegistry)); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + streamErrors_ = java.util.Collections.unmodifiableList(streamErrors_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.Builder.class); + } + + public static final int COMMIT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTime_; + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return commitTime_ != null; + } + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return getCommitTime(); + } + + public static final int STREAM_ERRORS_FIELD_NUMBER = 2; + private java.util.List streamErrors_; + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public java.util.List getStreamErrorsList() { + return streamErrors_; + } + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public java.util.List + getStreamErrorsOrBuilderList() { + return streamErrors_; + } + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public int getStreamErrorsCount() { + return streamErrors_.size(); + } + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError getStreamErrors(int index) { + return streamErrors_.get(index); + } + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder getStreamErrorsOrBuilder( + int index) { + return streamErrors_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (commitTime_ != null) { + output.writeMessage(1, getCommitTime()); + } + for (int i = 0; i < streamErrors_.size(); i++) { + output.writeMessage(2, streamErrors_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (commitTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTime()); + } + for (int i = 0; i < streamErrors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, streamErrors_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse other = + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) obj; + + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (!getStreamErrorsList().equals(other.getStreamErrorsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (getStreamErrorsCount() > 0) { + hash = (37 * hash) + STREAM_ERRORS_FIELD_NUMBER; + hash = (53 * hash) + getStreamErrorsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Response message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStreamErrorsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (commitTimeBuilder_ == null) { + commitTime_ = null; + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + if (streamErrorsBuilder_ == null) { + streamErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + streamErrorsBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse build() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse result = + new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse(this); + int from_bitField0_ = bitField0_; + if (commitTimeBuilder_ == null) { + result.commitTime_ = commitTime_; + } else { + result.commitTime_ = commitTimeBuilder_.build(); + } + if (streamErrorsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + streamErrors_ = java.util.Collections.unmodifiableList(streamErrors_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.streamErrors_ = streamErrors_; + } else { + result.streamErrors_ = streamErrorsBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + .getDefaultInstance()) return this; + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (streamErrorsBuilder_ == null) { + if (!other.streamErrors_.isEmpty()) { + if (streamErrors_.isEmpty()) { + streamErrors_ = other.streamErrors_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStreamErrorsIsMutable(); + streamErrors_.addAll(other.streamErrors_); + } + onChanged(); + } + } else { + if (!other.streamErrors_.isEmpty()) { + if (streamErrorsBuilder_.isEmpty()) { + streamErrorsBuilder_.dispose(); + streamErrorsBuilder_ = null; + streamErrors_ = other.streamErrors_; + bitField0_ = (bitField0_ & ~0x00000001); + streamErrorsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamErrorsFieldBuilder() + : null; + } else { + streamErrorsBuilder_.addAllMessages(other.streamErrors_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return commitTimeBuilder_ != null || commitTime_ != null; + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + onChanged(); + } else { + commitTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + onChanged(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (commitTime_ != null) { + commitTime_ = + com.google.protobuf.Timestamp.newBuilder(commitTime_).mergeFrom(value).buildPartial(); + } else { + commitTime_ = value; + } + onChanged(); + } else { + commitTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder clearCommitTime() { + if (commitTimeBuilder_ == null) { + commitTime_ = null; + onChanged(); + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private java.util.List streamErrors_ = + java.util.Collections.emptyList(); + + private void ensureStreamErrorsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + streamErrors_ = + new java.util.ArrayList( + streamErrors_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StorageError, + com.google.cloud.bigquery.storage.v1.StorageError.Builder, + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder> + streamErrorsBuilder_; + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public java.util.List getStreamErrorsList() { + if (streamErrorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streamErrors_); + } else { + return streamErrorsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public int getStreamErrorsCount() { + if (streamErrorsBuilder_ == null) { + return streamErrors_.size(); + } else { + return streamErrorsBuilder_.getCount(); + } + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError getStreamErrors(int index) { + if (streamErrorsBuilder_ == null) { + return streamErrors_.get(index); + } else { + return streamErrorsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder setStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.set(index, value); + onChanged(); + } else { + streamErrorsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder setStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.set(index, builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors(com.google.cloud.bigquery.storage.v1.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.add(value); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.add(index, value); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + com.google.cloud.bigquery.storage.v1.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(index, builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addAllStreamErrors( + java.lang.Iterable values) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streamErrors_); + onChanged(); + } else { + streamErrorsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder clearStreamErrors() { + if (streamErrorsBuilder_ == null) { + streamErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + streamErrorsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder removeStreamErrors(int index) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.remove(index); + onChanged(); + } else { + streamErrorsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError.Builder getStreamErrorsBuilder( + int index) { + return getStreamErrorsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder getStreamErrorsOrBuilder( + int index) { + if (streamErrorsBuilder_ == null) { + return streamErrors_.get(index); + } else { + return streamErrorsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public java.util.List + getStreamErrorsOrBuilderList() { + if (streamErrorsBuilder_ != null) { + return streamErrorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streamErrors_); + } + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError.Builder addStreamErrorsBuilder() { + return getStreamErrorsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance()); + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError.Builder addStreamErrorsBuilder( + int index) { + return getStreamErrorsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance()); + } + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public java.util.List + getStreamErrorsBuilderList() { + return getStreamErrorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StorageError, + com.google.cloud.bigquery.storage.v1.StorageError.Builder, + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder> + getStreamErrorsFieldBuilder() { + if (streamErrorsBuilder_ == null) { + streamErrorsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StorageError, + com.google.cloud.bigquery.storage.v1.StorageError.Builder, + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder>( + streamErrors_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + streamErrors_ = null; + } + return streamErrorsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + private static final com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BatchCommitWriteStreamsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java new file mode 100644 index 0000000000..ecb2e46fc8 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java @@ -0,0 +1,138 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface BatchCommitWriteStreamsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + java.util.List getStreamErrorsList(); + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + com.google.cloud.bigquery.storage.v1.StorageError getStreamErrors(int index); + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + int getStreamErrorsCount(); + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + java.util.List + getStreamErrorsOrBuilderList(); + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder getStreamErrorsOrBuilder(int index); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java new file mode 100644 index 0000000000..aa589b463f --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java @@ -0,0 +1,964 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `CreateWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.CreateWriteStreamRequest} + */ +public final class CreateWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + CreateWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use CreateWriteStreamRequest.newBuilder() to construct. + private CreateWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateWriteStreamRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateWriteStreamRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private CreateWriteStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: + { + com.google.cloud.bigquery.storage.v1.WriteStream.Builder subBuilder = null; + if (writeStream_ != null) { + subBuilder = writeStream_.toBuilder(); + } + writeStream_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1.WriteStream.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(writeStream_); + writeStream_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.WriteStream writeStream_; + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + @java.lang.Override + public boolean hasWriteStream() { + return writeStream_ != null; + } + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream() { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance() + : writeStream_; + } + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder getWriteStreamOrBuilder() { + return getWriteStream(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (writeStream_ != null) { + output.writeMessage(2, getWriteStream()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (writeStream_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getWriteStream()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasWriteStream() != other.hasWriteStream()) return false; + if (hasWriteStream()) { + if (!getWriteStream().equals(other.getWriteStream())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasWriteStream()) { + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `CreateWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.CreateWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + if (writeStreamBuilder_ == null) { + writeStream_ = null; + } else { + writeStream_ = null; + writeStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest(this); + result.parent_ = parent_; + if (writeStreamBuilder_ == null) { + result.writeStream_ = writeStream_; + } else { + result.writeStream_ = writeStreamBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.hasWriteStream()) { + mergeWriteStream(other.getWriteStream()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1.WriteStream writeStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.WriteStream, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder> + writeStreamBuilder_; + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + public boolean hasWriteStream() { + return writeStreamBuilder_ != null || writeStream_ != null; + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream() { + if (writeStreamBuilder_ == null) { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance() + : writeStream_; + } else { + return writeStreamBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream(com.google.cloud.bigquery.storage.v1.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + onChanged(); + } else { + writeStreamBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream( + com.google.cloud.bigquery.storage.v1.WriteStream.Builder builderForValue) { + if (writeStreamBuilder_ == null) { + writeStream_ = builderForValue.build(); + onChanged(); + } else { + writeStreamBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeWriteStream(com.google.cloud.bigquery.storage.v1.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (writeStream_ != null) { + writeStream_ = + com.google.cloud.bigquery.storage.v1.WriteStream.newBuilder(writeStream_) + .mergeFrom(value) + .buildPartial(); + } else { + writeStream_ = value; + } + onChanged(); + } else { + writeStreamBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearWriteStream() { + if (writeStreamBuilder_ == null) { + writeStream_ = null; + onChanged(); + } else { + writeStream_ = null; + writeStreamBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1.WriteStream.Builder getWriteStreamBuilder() { + + onChanged(); + return getWriteStreamFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder getWriteStreamOrBuilder() { + if (writeStreamBuilder_ != null) { + return writeStreamBuilder_.getMessageOrBuilder(); + } else { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance() + : writeStream_; + } + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.WriteStream, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder> + getWriteStreamFieldBuilder() { + if (writeStreamBuilder_ == null) { + writeStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.WriteStream, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder>( + getWriteStream(), getParentForChildren(), isClean()); + writeStream_ = null; + } + return writeStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CreateWriteStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java new file mode 100644 index 0000000000..6effd88b9c --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java @@ -0,0 +1,97 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface CreateWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + boolean hasWriteStream(); + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream(); + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder getWriteStreamOrBuilder(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java new file mode 100644 index 0000000000..de991cc61b --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java @@ -0,0 +1,665 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for invoking `FinalizeWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest} + */ +public final class FinalizeWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + FinalizeWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use FinalizeWriteStreamRequest.newBuilder() to construct. + private FinalizeWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private FinalizeWriteStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for invoking `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FinalizeWriteStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java new file mode 100644 index 0000000000..524f55e500 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface FinalizeWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java new file mode 100644 index 0000000000..f1abaf6643 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java @@ -0,0 +1,554 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response message for `FinalizeWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse} + */ +public final class FinalizeWriteStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + FinalizeWriteStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use FinalizeWriteStreamResponse.newBuilder() to construct. + private FinalizeWriteStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private FinalizeWriteStreamResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + rowCount_ = input.readInt64(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.Builder.class); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 1; + private long rowCount_; + /** + * + * + *
+   * Number of rows in the finalized stream.
+   * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (rowCount_ != 0L) { + output.writeInt64(1, rowCount_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, rowCount_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse other = + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Response message for `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + rowCount_ = 0L; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse build() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse result = + new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse(this); + result.rowCount_ = rowCount_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.getDefaultInstance()) + return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private long rowCount_; + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + private static final com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FinalizeWriteStreamResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java new file mode 100644 index 0000000000..3da951565e --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface FinalizeWriteStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Number of rows in the finalized stream.
+   * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + long getRowCount(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java new file mode 100644 index 0000000000..b878dcb467 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java @@ -0,0 +1,931 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `FlushRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsRequest} + */ +public final class FlushRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FlushRowsRequest) + FlushRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use FlushRowsRequest.newBuilder() to construct. + private FlushRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FlushRowsRequest() { + writeStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FlushRowsRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private FlushRowsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + writeStream_ = s; + break; + } + case 18: + { + com.google.protobuf.Int64Value.Builder subBuilder = null; + if (offset_ != null) { + subBuilder = offset_.toBuilder(); + } + offset_ = + input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(offset_); + offset_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.class, + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.Builder.class); + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 1; + private volatile java.lang.Object writeStream_; + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + @java.lang.Override + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private com.google.protobuf.Int64Value offset_; + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return offset_ != null; + } + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return getOffset(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getWriteStreamBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); + } + if (offset_ != null) { + output.writeMessage(2, getOffset()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getWriteStreamBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); + } + if (offset_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FlushRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FlushRowsRequest other = + (com.google.cloud.bigquery.storage.v1.FlushRowsRequest) obj; + + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `FlushRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FlushRowsRequest) + com.google.cloud.bigquery.storage.v1.FlushRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.class, + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FlushRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + writeStream_ = ""; + + if (offsetBuilder_ == null) { + offset_ = null; + } else { + offset_ = null; + offsetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FlushRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest build() { + com.google.cloud.bigquery.storage.v1.FlushRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.FlushRowsRequest result = + new com.google.cloud.bigquery.storage.v1.FlushRowsRequest(this); + result.writeStream_ = writeStream_; + if (offsetBuilder_ == null) { + result.offset_ = offset_; + } else { + result.offset_ = offsetBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FlushRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FlushRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.FlushRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.FlushRowsRequest.getDefaultInstance()) + return this; + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + onChanged(); + } + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.FlushRowsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.FlushRowsRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object writeStream_ = ""; + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + writeStream_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + + writeStream_ = getDefaultInstance().getWriteStream(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + writeStream_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return offsetBuilder_ != null || offset_ != null; + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + onChanged(); + } else { + offsetBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + onChanged(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (offset_ != null) { + offset_ = + com.google.protobuf.Int64Value.newBuilder(offset_).mergeFrom(value).buildPartial(); + } else { + offset_ = value; + } + onChanged(); + } else { + offsetBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder clearOffset() { + if (offsetBuilder_ == null) { + offset_ = null; + onChanged(); + } else { + offset_ = null; + offsetBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FlushRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FlushRowsRequest) + private static final com.google.cloud.bigquery.storage.v1.FlushRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FlushRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FlushRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FlushRowsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java new file mode 100644 index 0000000000..41db84641f --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface FlushRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FlushRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java new file mode 100644 index 0000000000..34086aa5ed --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java @@ -0,0 +1,547 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Respond message for `FlushRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsResponse} + */ +public final class FlushRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FlushRowsResponse) + FlushRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use FlushRowsResponse.newBuilder() to construct. + private FlushRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FlushRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FlushRowsResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private FlushRowsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + offset_ = input.readInt64(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.class, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.Builder.class); + } + + public static final int OFFSET_FIELD_NUMBER = 1; + private long offset_; + /** + * + * + *
+   * The rows before this offset (including this offset) are flushed.
+   * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (offset_ != 0L) { + output.writeInt64(1, offset_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (offset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, offset_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FlushRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FlushRowsResponse other = + (com.google.cloud.bigquery.storage.v1.FlushRowsResponse) obj; + + if (getOffset() != other.getOffset()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Respond message for `FlushRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FlushRowsResponse) + com.google.cloud.bigquery.storage.v1.FlushRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.class, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FlushRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + offset_ = 0L; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FlushRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse build() { + com.google.cloud.bigquery.storage.v1.FlushRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.FlushRowsResponse result = + new com.google.cloud.bigquery.storage.v1.FlushRowsResponse(this); + result.offset_ = offset_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FlushRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FlushRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.FlushRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1.FlushRowsResponse.getDefaultInstance()) + return this; + if (other.getOffset() != 0L) { + setOffset(other.getOffset()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.FlushRowsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.FlushRowsResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private long offset_; + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + + offset_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + + offset_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FlushRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FlushRowsResponse) + private static final com.google.cloud.bigquery.storage.v1.FlushRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FlushRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FlushRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FlushRowsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java new file mode 100644 index 0000000000..9608099deb --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface FlushRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FlushRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The rows before this offset (including this offset) are flushed.
+   * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + long getOffset(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java new file mode 100644 index 0000000000..d73891e8e0 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java @@ -0,0 +1,658 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `GetWriteStreamRequest`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.GetWriteStreamRequest} + */ +public final class GetWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + GetWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use GetWriteStreamRequest.newBuilder() to construct. + private GetWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetWriteStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetWriteStreamRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private GetWriteStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `GetWriteStreamRequest`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.GetWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetWriteStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java new file mode 100644 index 0000000000..fbe97e3a0e --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface GetWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java new file mode 100644 index 0000000000..6179ae6b21 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +package com.google.cloud.bigquery.storage.v1; + +public final class ProtoBufProto { + private ProtoBufProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n/google/cloud/bigquery/storage/v1/proto" + + "buf.proto\022 google.cloud.bigquery.storage" + + ".v1\032 google/protobuf/descriptor.proto\"I\n" + + "\013ProtoSchema\022:\n\020proto_descriptor\030\001 \001(\0132 " + + ".google.protobuf.DescriptorProto\"$\n\tProt" + + "oRows\022\027\n\017serialized_rows\030\001 \003(\014B\306\001\n$com.g" + + "oogle.cloud.bigquery.storage.v1B\rProtoBu" + + "fProtoP\001ZGgoogle.golang.org/genproto/goo" + + "gleapis/cloud/bigquery/storage/v1;storag" + + "e\252\002 Google.Cloud.BigQuery.Storage.V1\312\002 G" + + "oogle\\Cloud\\BigQuery\\Storage\\V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.DescriptorProtos.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor, + new java.lang.String[] { + "ProtoDescriptor", + }); + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor, + new java.lang.String[] { + "SerializedRows", + }); + com.google.protobuf.DescriptorProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java new file mode 100644 index 0000000000..1e02d77a3c --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java @@ -0,0 +1,691 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +package com.google.cloud.bigquery.storage.v1; + +/** Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoRows} */ +public final class ProtoRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ProtoRows) + ProtoRowsOrBuilder { + private static final long serialVersionUID = 0L; + // Use ProtoRows.newBuilder() to construct. + private ProtoRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoRows() { + serializedRows_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoRows(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ProtoRows( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + serializedRows_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + serializedRows_.add(input.readBytes()); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + serializedRows_ = java.util.Collections.unmodifiableList(serializedRows_); // C + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoRows.class, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder.class); + } + + public static final int SERIALIZED_ROWS_FIELD_NUMBER = 1; + private java.util.List serializedRows_; + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + @java.lang.Override + public java.util.List getSerializedRowsList() { + return serializedRows_; + } + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < serializedRows_.size(); i++) { + output.writeBytes(1, serializedRows_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < serializedRows_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(serializedRows_.get(i)); + } + size += dataSize; + size += 1 * getSerializedRowsList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ProtoRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ProtoRows other = + (com.google.cloud.bigquery.storage.v1.ProtoRows) obj; + + if (!getSerializedRowsList().equals(other.getSerializedRowsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSerializedRowsCount() > 0) { + hash = (37 * hash) + SERIALIZED_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRowsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ProtoRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoRows} */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ProtoRows) + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoRows.class, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ProtoRows.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + serializedRows_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows build() { + com.google.cloud.bigquery.storage.v1.ProtoRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows buildPartial() { + com.google.cloud.bigquery.storage.v1.ProtoRows result = + new com.google.cloud.bigquery.storage.v1.ProtoRows(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) != 0)) { + serializedRows_ = java.util.Collections.unmodifiableList(serializedRows_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.serializedRows_ = serializedRows_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ProtoRows) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ProtoRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ProtoRows other) { + if (other == com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance()) return this; + if (!other.serializedRows_.isEmpty()) { + if (serializedRows_.isEmpty()) { + serializedRows_ = other.serializedRows_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSerializedRowsIsMutable(); + serializedRows_.addAll(other.serializedRows_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.ProtoRows parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.bigquery.storage.v1.ProtoRows) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.util.List serializedRows_ = + java.util.Collections.emptyList(); + + private void ensureSerializedRowsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + serializedRows_ = new java.util.ArrayList(serializedRows_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + public java.util.List getSerializedRowsList() { + return ((bitField0_ & 0x00000001) != 0) + ? java.util.Collections.unmodifiableList(serializedRows_) + : serializedRows_; + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index to set the value at. + * @param value The serializedRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedRows(int index, com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param value The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addSerializedRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param values The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addAllSerializedRows( + java.lang.Iterable values) { + ensureSerializedRowsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, serializedRows_); + onChanged(); + return this; + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRows() { + serializedRows_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ProtoRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ProtoRows) + private static final com.google.cloud.bigquery.storage.v1.ProtoRows DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ProtoRows(); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProtoRows(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java new file mode 100644 index 0000000000..478554fa14 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface ProtoRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ProtoRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + java.util.List getSerializedRowsList(); + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + int getSerializedRowsCount(); + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + com.google.protobuf.ByteString getSerializedRows(int index); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java new file mode 100644 index 0000000000..eb011173bb --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java @@ -0,0 +1,833 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * ProtoSchema describes the schema of the serialized protocol buffer data rows.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoSchema} + */ +public final class ProtoSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ProtoSchema) + ProtoSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use ProtoSchema.newBuilder() to construct. + private ProtoSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoSchema() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ProtoSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder subBuilder = null; + if (protoDescriptor_ != null) { + subBuilder = protoDescriptor_.toBuilder(); + } + protoDescriptor_ = + input.readMessage( + com.google.protobuf.DescriptorProtos.DescriptorProto.PARSER, + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(protoDescriptor_); + protoDescriptor_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder.class); + } + + public static final int PROTO_DESCRIPTOR_FIELD_NUMBER = 1; + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + @java.lang.Override + public boolean hasProtoDescriptor() { + return protoDescriptor_ != null; + } + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + @java.lang.Override + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + @java.lang.Override + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + return getProtoDescriptor(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (protoDescriptor_ != null) { + output.writeMessage(1, getProtoDescriptor()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (protoDescriptor_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getProtoDescriptor()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ProtoSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ProtoSchema other = + (com.google.cloud.bigquery.storage.v1.ProtoSchema) obj; + + if (hasProtoDescriptor() != other.hasProtoDescriptor()) return false; + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().equals(other.getProtoDescriptor())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasProtoDescriptor()) { + hash = (37 * hash) + PROTO_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptor().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ProtoSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * ProtoSchema describes the schema of the serialized protocol buffer data rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ProtoSchema) + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ProtoSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = null; + } else { + protoDescriptor_ = null; + protoDescriptorBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema build() { + com.google.cloud.bigquery.storage.v1.ProtoSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.ProtoSchema result = + new com.google.cloud.bigquery.storage.v1.ProtoSchema(this); + if (protoDescriptorBuilder_ == null) { + result.protoDescriptor_ = protoDescriptor_; + } else { + result.protoDescriptor_ = protoDescriptorBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ProtoSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ProtoSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ProtoSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance()) + return this; + if (other.hasProtoDescriptor()) { + mergeProtoDescriptor(other.getProtoDescriptor()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.ProtoSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.bigquery.storage.v1.ProtoSchema) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + protoDescriptorBuilder_; + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + public boolean hasProtoDescriptor() { + return protoDescriptorBuilder_ != null || protoDescriptor_ != null; + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + if (protoDescriptorBuilder_ == null) { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } else { + return protoDescriptorBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor(com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptor_ = value; + onChanged(); + } else { + protoDescriptorBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder builderForValue) { + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = builderForValue.build(); + onChanged(); + } else { + protoDescriptorBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder mergeProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (protoDescriptor_ != null) { + protoDescriptor_ = + com.google.protobuf.DescriptorProtos.DescriptorProto.newBuilder(protoDescriptor_) + .mergeFrom(value) + .buildPartial(); + } else { + protoDescriptor_ = value; + } + onChanged(); + } else { + protoDescriptorBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder clearProtoDescriptor() { + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = null; + onChanged(); + } else { + protoDescriptor_ = null; + protoDescriptorBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto.Builder + getProtoDescriptorBuilder() { + + onChanged(); + return getProtoDescriptorFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + if (protoDescriptorBuilder_ != null) { + return protoDescriptorBuilder_.getMessageOrBuilder(); + } else { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + } + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + getProtoDescriptorFieldBuilder() { + if (protoDescriptorBuilder_ == null) { + protoDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder>( + getProtoDescriptor(), getParentForChildren(), isClean()); + protoDescriptor_ = null; + } + return protoDescriptorBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ProtoSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ProtoSchema) + private static final com.google.cloud.bigquery.storage.v1.ProtoSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ProtoSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProtoSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java new file mode 100644 index 0000000000..1abed6abb7 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java @@ -0,0 +1,81 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface ProtoSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ProtoSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + boolean hasProtoDescriptor(); + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor(); + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder getProtoDescriptorOrBuilder(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java new file mode 100644 index 0000000000..802a67f61a --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java @@ -0,0 +1,1261 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Structured custom BigQuery Storage error message. The error can be attached
+ * as error details in the returned rpc Status. In particular, the use of error
+ * codes allows more structured error handling, and reduces the need to evaluate
+ * unstructured error text strings.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.StorageError} + */ +public final class StorageError extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.StorageError) + StorageErrorOrBuilder { + private static final long serialVersionUID = 0L; + // Use StorageError.newBuilder() to construct. + private StorageError(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StorageError() { + code_ = 0; + entity_ = ""; + errorMessage_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StorageError(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StorageError( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + int rawValue = input.readEnum(); + + code_ = rawValue; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + entity_ = s; + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + + errorMessage_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StorageError.class, + com.google.cloud.bigquery.storage.v1.StorageError.Builder.class); + } + + /** + * + * + *
+   * Error code for `StorageError`.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode} + */ + public enum StorageErrorCode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Default error.
+     * 
+ * + * STORAGE_ERROR_CODE_UNSPECIFIED = 0; + */ + STORAGE_ERROR_CODE_UNSPECIFIED(0), + /** + * + * + *
+     * Table is not found in the system.
+     * 
+ * + * TABLE_NOT_FOUND = 1; + */ + TABLE_NOT_FOUND(1), + /** + * + * + *
+     * Stream is already committed.
+     * 
+ * + * STREAM_ALREADY_COMMITTED = 2; + */ + STREAM_ALREADY_COMMITTED(2), + /** + * + * + *
+     * Stream is not found.
+     * 
+ * + * STREAM_NOT_FOUND = 3; + */ + STREAM_NOT_FOUND(3), + /** + * + * + *
+     * Invalid Stream type.
+     * For example, you try to commit a stream that is not pending.
+     * 
+ * + * INVALID_STREAM_TYPE = 4; + */ + INVALID_STREAM_TYPE(4), + /** + * + * + *
+     * Invalid Stream state.
+     * For example, you try to commit a stream that is not finalized or is
+     * garbaged.
+     * 
+ * + * INVALID_STREAM_STATE = 5; + */ + INVALID_STREAM_STATE(5), + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + STREAM_FINALIZED(6), + /** + * + * + *
+     * There is a schema mismatch and it is caused by user schema has extra
+     * field than bigquery schema.
+     * 
+ * + * SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + */ + SCHEMA_MISMATCH_EXTRA_FIELDS(7), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Default error.
+     * 
+ * + * STORAGE_ERROR_CODE_UNSPECIFIED = 0; + */ + public static final int STORAGE_ERROR_CODE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * Table is not found in the system.
+     * 
+ * + * TABLE_NOT_FOUND = 1; + */ + public static final int TABLE_NOT_FOUND_VALUE = 1; + /** + * + * + *
+     * Stream is already committed.
+     * 
+ * + * STREAM_ALREADY_COMMITTED = 2; + */ + public static final int STREAM_ALREADY_COMMITTED_VALUE = 2; + /** + * + * + *
+     * Stream is not found.
+     * 
+ * + * STREAM_NOT_FOUND = 3; + */ + public static final int STREAM_NOT_FOUND_VALUE = 3; + /** + * + * + *
+     * Invalid Stream type.
+     * For example, you try to commit a stream that is not pending.
+     * 
+ * + * INVALID_STREAM_TYPE = 4; + */ + public static final int INVALID_STREAM_TYPE_VALUE = 4; + /** + * + * + *
+     * Invalid Stream state.
+     * For example, you try to commit a stream that is not finalized or is
+     * garbaged.
+     * 
+ * + * INVALID_STREAM_STATE = 5; + */ + public static final int INVALID_STREAM_STATE_VALUE = 5; + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + public static final int STREAM_FINALIZED_VALUE = 6; + /** + * + * + *
+     * There is a schema mismatch and it is caused by user schema has extra
+     * field than bigquery schema.
+     * 
+ * + * SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + */ + public static final int SCHEMA_MISMATCH_EXTRA_FIELDS_VALUE = 7; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static StorageErrorCode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static StorageErrorCode forNumber(int value) { + switch (value) { + case 0: + return STORAGE_ERROR_CODE_UNSPECIFIED; + case 1: + return TABLE_NOT_FOUND; + case 2: + return STREAM_ALREADY_COMMITTED; + case 3: + return STREAM_NOT_FOUND; + case 4: + return INVALID_STREAM_TYPE; + case 5: + return INVALID_STREAM_STATE; + case 6: + return STREAM_FINALIZED; + case 7: + return SCHEMA_MISMATCH_EXTRA_FIELDS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public StorageErrorCode findValueByNumber(int number) { + return StorageErrorCode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageError.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final StorageErrorCode[] VALUES = values(); + + public static StorageErrorCode valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private StorageErrorCode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode) + } + + public static final int CODE_FIELD_NUMBER = 1; + private int code_; + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode getCode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode result = + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.valueOf(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.UNRECOGNIZED + : result; + } + + public static final int ENTITY_FIELD_NUMBER = 2; + private volatile java.lang.Object entity_; + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The entity. + */ + @java.lang.Override + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } + } + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ERROR_MESSAGE_FIELD_NUMBER = 3; + private volatile java.lang.Object errorMessage_; + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + @java.lang.Override + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } + } + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (code_ + != com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode + .STORAGE_ERROR_CODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, code_); + } + if (!getEntityBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, entity_); + } + if (!getErrorMessageBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, errorMessage_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (code_ + != com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode + .STORAGE_ERROR_CODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, code_); + } + if (!getEntityBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, entity_); + } + if (!getErrorMessageBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, errorMessage_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.StorageError)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.StorageError other = + (com.google.cloud.bigquery.storage.v1.StorageError) obj; + + if (code_ != other.code_) return false; + if (!getEntity().equals(other.getEntity())) return false; + if (!getErrorMessage().equals(other.getErrorMessage())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CODE_FIELD_NUMBER; + hash = (53 * hash) + code_; + hash = (37 * hash) + ENTITY_FIELD_NUMBER; + hash = (53 * hash) + getEntity().hashCode(); + hash = (37 * hash) + ERROR_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getErrorMessage().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.StorageError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Structured custom BigQuery Storage error message. The error can be attached
+   * as error details in the returned rpc Status. In particular, the use of error
+   * codes allows more structured error handling, and reduces the need to evaluate
+   * unstructured error text strings.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.StorageError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.StorageError) + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StorageError.class, + com.google.cloud.bigquery.storage.v1.StorageError.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.StorageError.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + code_ = 0; + + entity_ = ""; + + errorMessage_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError build() { + com.google.cloud.bigquery.storage.v1.StorageError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError buildPartial() { + com.google.cloud.bigquery.storage.v1.StorageError result = + new com.google.cloud.bigquery.storage.v1.StorageError(this); + result.code_ = code_; + result.entity_ = entity_; + result.errorMessage_ = errorMessage_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.StorageError) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.StorageError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.StorageError other) { + if (other == com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance()) + return this; + if (other.code_ != 0) { + setCodeValue(other.getCodeValue()); + } + if (!other.getEntity().isEmpty()) { + entity_ = other.entity_; + onChanged(); + } + if (!other.getErrorMessage().isEmpty()) { + errorMessage_ = other.errorMessage_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.StorageError parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.StorageError) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int code_ = 0; + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @param value The enum numeric value on the wire for code to set. + * @return This builder for chaining. + */ + public Builder setCodeValue(int value) { + + code_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode getCode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode result = + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.valueOf(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @param value The code to set. + * @return This builder for chaining. + */ + public Builder setCode( + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode value) { + if (value == null) { + throw new NullPointerException(); + } + + code_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return This builder for chaining. + */ + public Builder clearCode() { + + code_ = 0; + onChanged(); + return this; + } + + private java.lang.Object entity_ = ""; + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return The entity. + */ + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @param value The entity to set. + * @return This builder for chaining. + */ + public Builder setEntity(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + entity_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return This builder for chaining. + */ + public Builder clearEntity() { + + entity_ = getDefaultInstance().getEntity(); + onChanged(); + return this; + } + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @param value The bytes for entity to set. + * @return This builder for chaining. + */ + public Builder setEntityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + entity_ = value; + onChanged(); + return this; + } + + private java.lang.Object errorMessage_ = ""; + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @param value The errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + errorMessage_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return This builder for chaining. + */ + public Builder clearErrorMessage() { + + errorMessage_ = getDefaultInstance().getErrorMessage(); + onChanged(); + return this; + } + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @param value The bytes for errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + errorMessage_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.StorageError) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.StorageError) + private static final com.google.cloud.bigquery.storage.v1.StorageError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.StorageError(); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StorageError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StorageError(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java new file mode 100644 index 0000000000..c483c0fda6 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java @@ -0,0 +1,100 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface StorageErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.StorageError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + int getCodeValue(); + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode getCode(); + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The entity. + */ + java.lang.String getEntity(); + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + com.google.protobuf.ByteString getEntityBytes(); + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + java.lang.String getErrorMessage(); + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + com.google.protobuf.ByteString getErrorMessageBytes(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java index e09d0d6b72..d1f323484c 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java @@ -59,6 +59,58 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -75,68 +127,156 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "vior.proto\032\031google/api/resource.proto\032,g" + "oogle/cloud/bigquery/storage/v1/arrow.pr" + "oto\032+google/cloud/bigquery/storage/v1/av" - + "ro.proto\032-google/cloud/bigquery/storage/" - + "v1/stream.proto\"\303\001\n\030CreateReadSessionReq" - + "uest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\n+cloudresou" - + "rcemanager.googleapis.com/Project\022H\n\014rea" - + "d_session\030\002 \001(\0132-.google.cloud.bigquery." - + "storage.v1.ReadSessionB\003\340A\002\022\030\n\020max_strea" - + "m_count\030\003 \001(\005\"i\n\017ReadRowsRequest\022F\n\013read" - + "_stream\030\001 \001(\tB1\340A\002\372A+\n)bigquerystorage.g" - + "oogleapis.com/ReadStream\022\016\n\006offset\030\002 \001(\003" - + "\")\n\rThrottleState\022\030\n\020throttle_percent\030\001 " - + "\001(\005\"\227\001\n\013StreamStats\022H\n\010progress\030\002 \001(\01326." - + "google.cloud.bigquery.storage.v1.StreamS" - + "tats.Progress\032>\n\010Progress\022\031\n\021at_response" - + "_start\030\001 \001(\001\022\027\n\017at_response_end\030\002 \001(\001\"\347\003" - + "\n\020ReadRowsResponse\022?\n\tavro_rows\030\003 \001(\0132*." - + "google.cloud.bigquery.storage.v1.AvroRow" - + "sH\000\022P\n\022arrow_record_batch\030\004 \001(\01322.google" - + ".cloud.bigquery.storage.v1.ArrowRecordBa" - + "tchH\000\022\021\n\trow_count\030\006 \001(\003\022<\n\005stats\030\002 \001(\0132" - + "-.google.cloud.bigquery.storage.v1.Strea" - + "mStats\022G\n\016throttle_state\030\005 \001(\0132/.google." - + "cloud.bigquery.storage.v1.ThrottleState\022" - + "H\n\013avro_schema\030\007 \001(\0132,.google.cloud.bigq" - + "uery.storage.v1.AvroSchemaB\003\340A\003H\001\022J\n\014arr" - + "ow_schema\030\010 \001(\0132-.google.cloud.bigquery." - + "storage.v1.ArrowSchemaB\003\340A\003H\001B\006\n\004rowsB\010\n" - + "\006schema\"k\n\026SplitReadStreamRequest\022?\n\004nam" - + "e\030\001 \001(\tB1\340A\002\372A+\n)bigquerystorage.googlea" - + "pis.com/ReadStream\022\020\n\010fraction\030\002 \001(\001\"\247\001\n" - + "\027SplitReadStreamResponse\022D\n\016primary_stre" - + "am\030\001 \001(\0132,.google.cloud.bigquery.storage" - + ".v1.ReadStream\022F\n\020remainder_stream\030\002 \001(\013" - + "2,.google.cloud.bigquery.storage.v1.Read" - + "Stream2\306\006\n\014BigQueryRead\022\351\001\n\021CreateReadSe" - + "ssion\022:.google.cloud.bigquery.storage.v1" - + ".CreateReadSessionRequest\032-.google.cloud" - + ".bigquery.storage.v1.ReadSession\"i\202\323\344\223\002<" - + "\"7/v1/{read_session.table=projects/*/dat" - + "asets/*/tables/*}:\001*\332A$parent,read_sessi" - + "on,max_stream_count\022\317\001\n\010ReadRows\0221.googl" - + "e.cloud.bigquery.storage.v1.ReadRowsRequ" - + "est\0322.google.cloud.bigquery.storage.v1.R" - + "eadRowsResponse\"Z\202\323\344\223\002?\022=/v1/{read_strea" - + "m=projects/*/locations/*/sessions/*/stre" - + "ams/*}\332A\022read_stream,offset0\001\022\306\001\n\017SplitR" - + "eadStream\0228.google.cloud.bigquery.storag" - + "e.v1.SplitReadStreamRequest\0329.google.clo" - + "ud.bigquery.storage.v1.SplitReadStreamRe" - + "sponse\">\202\323\344\223\0028\0226/v1/{name=projects/*/loc" - + "ations/*/sessions/*/streams/*}\032\256\001\312A\036bigq" - + "uerystorage.googleapis.com\322A\211\001https://ww" - + "w.googleapis.com/auth/bigquery,https://w" - + "ww.googleapis.com/auth/bigquery.readonly" - + ",https://www.googleapis.com/auth/cloud-p" - + "latformB\235\002\n$com.google.cloud.bigquery.st" - + "orage.v1B\014StorageProtoP\001ZGgoogle.golang." - + "org/genproto/googleapis/cloud/bigquery/s" - + "torage/v1;storage\252\002 Google.Cloud.BigQuer" - + "y.Storage.V1\312\002 Google\\Cloud\\BigQuery\\Sto" - + "rage\\V1\352AU\n\035bigquery.googleapis.com/Tabl" - + "e\0224projects/{project}/datasets/{dataset}" - + "/tables/{table}b\006proto3" + + "ro.proto\032/google/cloud/bigquery/storage/" + + "v1/protobuf.proto\032-google/cloud/bigquery" + + "/storage/v1/stream.proto\032,google/cloud/b" + + "igquery/storage/v1/table.proto\032\037google/p" + + "rotobuf/timestamp.proto\032\036google/protobuf" + + "/wrappers.proto\032\027google/rpc/status.proto" + + "\"\303\001\n\030CreateReadSessionRequest\022C\n\006parent\030" + + "\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager.goog" + + "leapis.com/Project\022H\n\014read_session\030\002 \001(\013" + + "2-.google.cloud.bigquery.storage.v1.Read" + + "SessionB\003\340A\002\022\030\n\020max_stream_count\030\003 \001(\005\"i" + + "\n\017ReadRowsRequest\022F\n\013read_stream\030\001 \001(\tB1" + + "\340A\002\372A+\n)bigquerystorage.googleapis.com/R" + + "eadStream\022\016\n\006offset\030\002 \001(\003\")\n\rThrottleSta" + + "te\022\030\n\020throttle_percent\030\001 \001(\005\"\227\001\n\013StreamS" + + "tats\022H\n\010progress\030\002 \001(\01326.google.cloud.bi" + + "gquery.storage.v1.StreamStats.Progress\032>" + + "\n\010Progress\022\031\n\021at_response_start\030\001 \001(\001\022\027\n" + + "\017at_response_end\030\002 \001(\001\"\347\003\n\020ReadRowsRespo" + + "nse\022?\n\tavro_rows\030\003 \001(\0132*.google.cloud.bi" + + "gquery.storage.v1.AvroRowsH\000\022P\n\022arrow_re" + + "cord_batch\030\004 \001(\01322.google.cloud.bigquery" + + ".storage.v1.ArrowRecordBatchH\000\022\021\n\trow_co" + + "unt\030\006 \001(\003\022<\n\005stats\030\002 \001(\0132-.google.cloud." + + "bigquery.storage.v1.StreamStats\022G\n\016throt" + + "tle_state\030\005 \001(\0132/.google.cloud.bigquery." + + "storage.v1.ThrottleState\022H\n\013avro_schema\030" + + "\007 \001(\0132,.google.cloud.bigquery.storage.v1" + + ".AvroSchemaB\003\340A\003H\001\022J\n\014arrow_schema\030\010 \001(\013" + + "2-.google.cloud.bigquery.storage.v1.Arro" + + "wSchemaB\003\340A\003H\001B\006\n\004rowsB\010\n\006schema\"k\n\026Spli" + + "tReadStreamRequest\022?\n\004name\030\001 \001(\tB1\340A\002\372A+" + + "\n)bigquerystorage.googleapis.com/ReadStr" + + "eam\022\020\n\010fraction\030\002 \001(\001\"\247\001\n\027SplitReadStrea" + + "mResponse\022D\n\016primary_stream\030\001 \001(\0132,.goog" + + "le.cloud.bigquery.storage.v1.ReadStream\022" + + "F\n\020remainder_stream\030\002 \001(\0132,.google.cloud" + + ".bigquery.storage.v1.ReadStream\"\233\001\n\030Crea" + + "teWriteStreamRequest\0225\n\006parent\030\001 \001(\tB%\340A" + + "\002\372A\037\n\035bigquery.googleapis.com/Table\022H\n\014w" + + "rite_stream\030\002 \001(\0132-.google.cloud.bigquer" + + "y.storage.v1.WriteStreamB\003\340A\002\"\210\003\n\021Append" + + "RowsRequest\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A" + + ",\n*bigquerystorage.googleapis.com/WriteS" + + "tream\022+\n\006offset\030\002 \001(\0132\033.google.protobuf." + + "Int64Value\022S\n\nproto_rows\030\004 \001(\0132=.google." + + "cloud.bigquery.storage.v1.AppendRowsRequ" + + "est.ProtoDataH\000\022\020\n\010trace_id\030\006 \001(\t\032\214\001\n\tPr" + + "otoData\022D\n\rwriter_schema\030\001 \001(\0132-.google." + + "cloud.bigquery.storage.v1.ProtoSchema\0229\n" + + "\004rows\030\002 \001(\0132+.google.cloud.bigquery.stor" + + "age.v1.ProtoRowsB\006\n\004rows\"\245\002\n\022AppendRowsR" + + "esponse\022Z\n\rappend_result\030\001 \001(\0132A.google." + + "cloud.bigquery.storage.v1.AppendRowsResp" + + "onse.AppendResultH\000\022#\n\005error\030\002 \001(\0132\022.goo" + + "gle.rpc.StatusH\000\022E\n\016updated_schema\030\003 \001(\013" + + "2-.google.cloud.bigquery.storage.v1.Tabl" + + "eSchema\032;\n\014AppendResult\022+\n\006offset\030\001 \001(\0132" + + "\033.google.protobuf.Int64ValueB\n\n\010response" + + "\"Y\n\025GetWriteStreamRequest\022@\n\004name\030\001 \001(\tB" + + "2\340A\002\372A,\n*bigquerystorage.googleapis.com/" + + "WriteStream\"Q\n\036BatchCommitWriteStreamsRe" + + "quest\022\023\n\006parent\030\001 \001(\tB\003\340A\002\022\032\n\rwrite_stre" + + "ams\030\002 \003(\tB\003\340A\002\"\231\001\n\037BatchCommitWriteStrea" + + "msResponse\022/\n\013commit_time\030\001 \001(\0132\032.google" + + ".protobuf.Timestamp\022E\n\rstream_errors\030\002 \003" + + "(\0132..google.cloud.bigquery.storage.v1.St" + + "orageError\"^\n\032FinalizeWriteStreamRequest" + + "\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquerystorage." + + "googleapis.com/WriteStream\"0\n\033FinalizeWr" + + "iteStreamResponse\022\021\n\trow_count\030\001 \001(\003\"\211\001\n" + + "\020FlushRowsRequest\022H\n\014write_stream\030\001 \001(\tB" + + "2\340A\002\372A,\n*bigquerystorage.googleapis.com/" + + "WriteStream\022+\n\006offset\030\002 \001(\0132\033.google.pro" + + "tobuf.Int64Value\"#\n\021FlushRowsResponse\022\016\n" + + "\006offset\030\001 \001(\003\"\361\002\n\014StorageError\022M\n\004code\030\001" + + " \001(\0162?.google.cloud.bigquery.storage.v1." + + "StorageError.StorageErrorCode\022\016\n\006entity\030" + + "\002 \001(\t\022\025\n\rerror_message\030\003 \001(\t\"\352\001\n\020Storage" + + "ErrorCode\022\"\n\036STORAGE_ERROR_CODE_UNSPECIF" + + "IED\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\034\n\030STREAM_ALR" + + "EADY_COMMITTED\020\002\022\024\n\020STREAM_NOT_FOUND\020\003\022\027" + + "\n\023INVALID_STREAM_TYPE\020\004\022\030\n\024INVALID_STREA" + + "M_STATE\020\005\022\024\n\020STREAM_FINALIZED\020\006\022 \n\034SCHEM" + + "A_MISMATCH_EXTRA_FIELDS\020\0072\306\006\n\014BigQueryRe" + + "ad\022\351\001\n\021CreateReadSession\022:.google.cloud." + + "bigquery.storage.v1.CreateReadSessionReq" + + "uest\032-.google.cloud.bigquery.storage.v1." + + "ReadSession\"i\202\323\344\223\002<\"7/v1/{read_session.t" + + "able=projects/*/datasets/*/tables/*}:\001*\332" + + "A$parent,read_session,max_stream_count\022\317" + + "\001\n\010ReadRows\0221.google.cloud.bigquery.stor" + + "age.v1.ReadRowsRequest\0322.google.cloud.bi" + + "gquery.storage.v1.ReadRowsResponse\"Z\202\323\344\223" + + "\002?\022=/v1/{read_stream=projects/*/location" + + "s/*/sessions/*/streams/*}\332A\022read_stream," + + "offset0\001\022\306\001\n\017SplitReadStream\0228.google.cl" + + "oud.bigquery.storage.v1.SplitReadStreamR" + + "equest\0329.google.cloud.bigquery.storage.v" + + "1.SplitReadStreamResponse\">\202\323\344\223\0028\0226/v1/{" + + "name=projects/*/locations/*/sessions/*/s" + + "treams/*}\032\256\001\312A\036bigquerystorage.googleapi" + + "s.com\322A\211\001https://www.googleapis.com/auth" + + "/bigquery,https://www.googleapis.com/aut" + + "h/bigquery.readonly,https://www.googleap" + + "is.com/auth/cloud-platform2\274\013\n\rBigQueryW" + + "rite\022\327\001\n\021CreateWriteStream\022:.google.clou" + + "d.bigquery.storage.v1.CreateWriteStreamR" + + "equest\032-.google.cloud.bigquery.storage.v" + + "1.WriteStream\"W\202\323\344\223\002;\"+/v1/{parent=proje" + + "cts/*/datasets/*/tables/*}:\014write_stream" + + "\332A\023parent,write_stream\022\322\001\n\nAppendRows\0223." + + "google.cloud.bigquery.storage.v1.AppendR" + + "owsRequest\0324.google.cloud.bigquery.stora" + + "ge.v1.AppendRowsResponse\"U\202\323\344\223\002@\";/v1/{w" + + "rite_stream=projects/*/datasets/*/tables" + + "/*/streams/*}:\001*\332A\014write_stream(\0010\001\022\277\001\n\016" + + "GetWriteStream\0227.google.cloud.bigquery.s" + + "torage.v1.GetWriteStreamRequest\032-.google" + + ".cloud.bigquery.storage.v1.WriteStream\"E" + + "\202\323\344\223\0028\"3/v1/{name=projects/*/datasets/*/" + + "tables/*/streams/*}:\001*\332A\004name\022\331\001\n\023Finali" + + "zeWriteStream\022<.google.cloud.bigquery.st" + + "orage.v1.FinalizeWriteStreamRequest\032=.go" + + "ogle.cloud.bigquery.storage.v1.FinalizeW" + + "riteStreamResponse\"E\202\323\344\223\0028\"3/v1/{name=pr" + + "ojects/*/datasets/*/tables/*/streams/*}:" + + "\001*\332A\004name\022\334\001\n\027BatchCommitWriteStreams\022@." + + "google.cloud.bigquery.storage.v1.BatchCo" + + "mmitWriteStreamsRequest\032A.google.cloud.b" + + "igquery.storage.v1.BatchCommitWriteStrea" + + "msResponse\"<\202\323\344\223\002-\022+/v1/{parent=projects" + + "/*/datasets/*/tables/*}\332A\006parent\022\313\001\n\tFlu" + + "shRows\0222.google.cloud.bigquery.storage.v" + + "1.FlushRowsRequest\0323.google.cloud.bigque" + + "ry.storage.v1.FlushRowsResponse\"U\202\323\344\223\002@\"" + + ";/v1/{write_stream=projects/*/datasets/*" + + "/tables/*/streams/*}:\001*\332A\014write_stream\032\260" + + "\001\312A\036bigquerystorage.googleapis.com\322A\213\001ht" + + "tps://www.googleapis.com/auth/bigquery,h" + + "ttps://www.googleapis.com/auth/bigquery." + + "insertdata,https://www.googleapis.com/au" + + "th/cloud-platformB\235\002\n$com.google.cloud.b" + + "igquery.storage.v1B\014StorageProtoP\001ZGgoog" + + "le.golang.org/genproto/googleapis/cloud/" + + "bigquery/storage/v1;storage\252\002 Google.Clo" + + "ud.BigQuery.Storage.V1\312\002 Google\\Cloud\\Bi" + + "gQuery\\Storage\\V1\352AU\n\035bigquery.googleapi" + + "s.com/Table\0224projects/{project}/datasets" + + "/{dataset}/tables/{table}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -148,7 +288,12 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.ResourceProto.getDescriptor(), com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(), com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.ProtoBufProto.getDescriptor(), com.google.cloud.bigquery.storage.v1.StreamProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.protobuf.WrappersProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), }); internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor = getDescriptor().getMessageTypes().get(0); @@ -224,6 +369,114 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "PrimaryStream", "RemainderStream", }); + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStream", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor, + new java.lang.String[] { + "WriteStream", "Offset", "ProtoRows", "TraceId", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor = + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor, + new java.lang.String[] { + "WriterSchema", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor, + new java.lang.String[] { + "AppendResult", "Error", "UpdatedSchema", "Response", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor = + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor, + new java.lang.String[] { + "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStreams", + }); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor, + new java.lang.String[] { + "CommitTime", "StreamErrors", + }); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor, + new java.lang.String[] { + "RowCount", + }); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor, + new java.lang.String[] { + "WriteStream", "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor, + new java.lang.String[] { + "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor, + new java.lang.String[] { + "Code", "Entity", "ErrorMessage", + }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.ClientProto.defaultHost); @@ -241,7 +494,12 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.ResourceProto.getDescriptor(); com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(); com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.ProtoBufProto.getDescriptor(); com.google.cloud.bigquery.storage.v1.StreamProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.WrappersProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java index e842d14387..83c4d38fdf 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java @@ -43,6 +43,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_bigquery_storage_v1_ReadStream_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_bigquery_storage_v1_ReadStream_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -57,45 +61,59 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "1\032\037google/api/field_behavior.proto\032\031goog" + "le/api/resource.proto\032,google/cloud/bigq" + "uery/storage/v1/arrow.proto\032+google/clou" - + "d/bigquery/storage/v1/avro.proto\032\037google" - + "/protobuf/timestamp.proto\"\244\010\n\013ReadSessio" - + "n\022\021\n\004name\030\001 \001(\tB\003\340A\003\0224\n\013expire_time\030\002 \001(" - + "\0132\032.google.protobuf.TimestampB\003\340A\003\022F\n\013da" - + "ta_format\030\003 \001(\0162,.google.cloud.bigquery." - + "storage.v1.DataFormatB\003\340A\005\022H\n\013avro_schem" - + "a\030\004 \001(\0132,.google.cloud.bigquery.storage." - + "v1.AvroSchemaB\003\340A\003H\000\022J\n\014arrow_schema\030\005 \001" - + "(\0132-.google.cloud.bigquery.storage.v1.Ar" - + "rowSchemaB\003\340A\003H\000\0224\n\005table\030\006 \001(\tB%\340A\005\372A\037\n" - + "\035bigquery.googleapis.com/Table\022Z\n\017table_" - + "modifiers\030\007 \001(\0132<.google.cloud.bigquery." - + "storage.v1.ReadSession.TableModifiersB\003\340" - + "A\001\022Y\n\014read_options\030\010 \001(\0132>.google.cloud." - + "bigquery.storage.v1.ReadSession.TableRea" - + "dOptionsB\003\340A\001\022B\n\007streams\030\n \003(\0132,.google." - + "cloud.bigquery.storage.v1.ReadStreamB\003\340A" - + "\003\022*\n\035estimated_total_bytes_scanned\030\014 \001(\003" - + "B\003\340A\003\032C\n\016TableModifiers\0221\n\rsnapshot_time" - + "\030\001 \001(\0132\032.google.protobuf.Timestamp\032\324\001\n\020T" - + "ableReadOptions\022\027\n\017selected_fields\030\001 \003(\t" - + "\022\027\n\017row_restriction\030\002 \001(\t\022g\n\033arrow_seria" - + "lization_options\030\003 \001(\0132;.google.cloud.bi" - + "gquery.storage.v1.ArrowSerializationOpti" - + "onsB\003\340A\001H\000B%\n#output_format_serializatio" - + "n_options:k\352Ah\n*bigquerystorage.googleap" - + "is.com/ReadSession\022:projects/{project}/l" - + "ocations/{location}/sessions/{session}B\010" - + "\n\006schema\"\234\001\n\nReadStream\022\021\n\004name\030\001 \001(\tB\003\340" - + "A\003:{\352Ax\n)bigquerystorage.googleapis.com/" - + "ReadStream\022Kprojects/{project}/locations" - + "/{location}/sessions/{session}/streams/{" - + "stream}*>\n\nDataFormat\022\033\n\027DATA_FORMAT_UNS" - + "PECIFIED\020\000\022\010\n\004AVRO\020\001\022\t\n\005ARROW\020\002B\304\001\n$com." - + "google.cloud.bigquery.storage.v1B\013Stream" - + "ProtoP\001ZGgoogle.golang.org/genproto/goog" - + "leapis/cloud/bigquery/storage/v1;storage" - + "\252\002 Google.Cloud.BigQuery.Storage.V1\312\002 Go" - + "ogle\\Cloud\\BigQuery\\Storage\\V1b\006proto3" + + "d/bigquery/storage/v1/avro.proto\032,google" + + "/cloud/bigquery/storage/v1/table.proto\032\037" + + "google/protobuf/timestamp.proto\"\244\010\n\013Read" + + "Session\022\021\n\004name\030\001 \001(\tB\003\340A\003\0224\n\013expire_tim" + + "e\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003" + + "\022F\n\013data_format\030\003 \001(\0162,.google.cloud.big" + + "query.storage.v1.DataFormatB\003\340A\005\022H\n\013avro" + + "_schema\030\004 \001(\0132,.google.cloud.bigquery.st" + + "orage.v1.AvroSchemaB\003\340A\003H\000\022J\n\014arrow_sche" + + "ma\030\005 \001(\0132-.google.cloud.bigquery.storage" + + ".v1.ArrowSchemaB\003\340A\003H\000\0224\n\005table\030\006 \001(\tB%\340" + + "A\005\372A\037\n\035bigquery.googleapis.com/Table\022Z\n\017" + + "table_modifiers\030\007 \001(\0132<.google.cloud.big" + + "query.storage.v1.ReadSession.TableModifi" + + "ersB\003\340A\001\022Y\n\014read_options\030\010 \001(\0132>.google." + + "cloud.bigquery.storage.v1.ReadSession.Ta" + + "bleReadOptionsB\003\340A\001\022B\n\007streams\030\n \003(\0132,.g" + + "oogle.cloud.bigquery.storage.v1.ReadStre" + + "amB\003\340A\003\022*\n\035estimated_total_bytes_scanned" + + "\030\014 \001(\003B\003\340A\003\032C\n\016TableModifiers\0221\n\rsnapsho" + + "t_time\030\001 \001(\0132\032.google.protobuf.Timestamp" + + "\032\324\001\n\020TableReadOptions\022\027\n\017selected_fields" + + "\030\001 \003(\t\022\027\n\017row_restriction\030\002 \001(\t\022g\n\033arrow" + + "_serialization_options\030\003 \001(\0132;.google.cl" + + "oud.bigquery.storage.v1.ArrowSerializati" + + "onOptionsB\003\340A\001H\000B%\n#output_format_serial" + + "ization_options:k\352Ah\n*bigquerystorage.go" + + "ogleapis.com/ReadSession\022:projects/{proj" + + "ect}/locations/{location}/sessions/{sess" + + "ion}B\010\n\006schema\"\234\001\n\nReadStream\022\021\n\004name\030\001 " + + "\001(\tB\003\340A\003:{\352Ax\n)bigquerystorage.googleapi" + + "s.com/ReadStream\022Kprojects/{project}/loc" + + "ations/{location}/sessions/{session}/str" + + "eams/{stream}\"\335\003\n\013WriteStream\022\021\n\004name\030\001 " + + "\001(\tB\003\340A\003\022E\n\004type\030\002 \001(\01622.google.cloud.bi" + + "gquery.storage.v1.WriteStream.TypeB\003\340A\005\022" + + "4\n\013create_time\030\003 \001(\0132\032.google.protobuf.T" + + "imestampB\003\340A\003\0224\n\013commit_time\030\004 \001(\0132\032.goo" + + "gle.protobuf.TimestampB\003\340A\003\022H\n\014table_sch" + + "ema\030\005 \001(\0132-.google.cloud.bigquery.storag" + + "e.v1.TableSchemaB\003\340A\003\"F\n\004Type\022\024\n\020TYPE_UN" + + "SPECIFIED\020\000\022\r\n\tCOMMITTED\020\001\022\013\n\007PENDING\020\002\022" + + "\014\n\010BUFFERED\020\003:v\352As\n*bigquerystorage.goog" + + "leapis.com/WriteStream\022Eprojects/{projec" + + "t}/datasets/{dataset}/tables/{table}/str" + + "eams/{stream}*>\n\nDataFormat\022\033\n\027DATA_FORM" + + "AT_UNSPECIFIED\020\000\022\010\n\004AVRO\020\001\022\t\n\005ARROW\020\002B\304\001" + + "\n$com.google.cloud.bigquery.storage.v1B\013" + + "StreamProtoP\001ZGgoogle.golang.org/genprot" + + "o/googleapis/cloud/bigquery/storage/v1;s" + + "torage\252\002 Google.Cloud.BigQuery.Storage.V" + + "1\312\002 Google\\Cloud\\BigQuery\\Storage\\V1b\006pr" + + "oto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -105,6 +123,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.ResourceProto.getDescriptor(), com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(), com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), }); internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor = @@ -156,6 +175,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Name", }); + internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor, + new java.lang.String[] { + "Name", "Type", "CreateTime", "CommitTime", "TableSchema", + }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); @@ -167,6 +194,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.ResourceProto.getDescriptor(); com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(); com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); } diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java index e784673caa..5a1abc3f37 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java @@ -22,7 +22,7 @@ * * *
- * Estimated stream statistics for a given Stream.
+ * Estimated stream statistics for a given read Stream.
  * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1.StreamStats} @@ -1031,7 +1031,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Estimated stream statistics for a given Stream.
+   * Estimated stream statistics for a given read Stream.
    * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1.StreamStats} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java new file mode 100644 index 0000000000..79ec593d6e --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java @@ -0,0 +1,2673 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * TableFieldSchema defines a single field/column within a table schema.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableFieldSchema} + */ +public final class TableFieldSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.TableFieldSchema) + TableFieldSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use TableFieldSchema.newBuilder() to construct. + private TableFieldSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableFieldSchema() { + name_ = ""; + type_ = 0; + mode_ = 0; + fields_ = java.util.Collections.emptyList(); + description_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableFieldSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TableFieldSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: + { + int rawValue = input.readEnum(); + + type_ = rawValue; + break; + } + case 24: + { + int rawValue = input.readEnum(); + + mode_ = rawValue; + break; + } + case 34: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1.TableFieldSchema>(); + mutable_bitField0_ |= 0x00000001; + } + fields_.add( + input.readMessage( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.parser(), + extensionRegistry)); + break; + } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + + description_ = s; + break; + } + case 56: + { + maxLength_ = input.readInt64(); + break; + } + case 64: + { + precision_ = input.readInt64(); + break; + } + case 72: + { + scale_ = input.readInt64(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder.class); + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1.TableFieldSchema.Type} */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Illegal value
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * 64K, UTF8
+     * 
+ * + * STRING = 1; + */ + STRING(1), + /** + * + * + *
+     * 64-bit signed
+     * 
+ * + * INT64 = 2; + */ + INT64(2), + /** + * + * + *
+     * 64-bit IEEE floating point
+     * 
+ * + * DOUBLE = 3; + */ + DOUBLE(3), + /** + * + * + *
+     * Aggregate type
+     * 
+ * + * STRUCT = 4; + */ + STRUCT(4), + /** + * + * + *
+     * 64K, Binary
+     * 
+ * + * BYTES = 5; + */ + BYTES(5), + /** + * + * + *
+     * 2-valued
+     * 
+ * + * BOOL = 6; + */ + BOOL(6), + /** + * + * + *
+     * 64-bit signed usec since UTC epoch
+     * 
+ * + * TIMESTAMP = 7; + */ + TIMESTAMP(7), + /** + * + * + *
+     * Civil date - Year, Month, Day
+     * 
+ * + * DATE = 8; + */ + DATE(8), + /** + * + * + *
+     * Civil time - Hour, Minute, Second, Microseconds
+     * 
+ * + * TIME = 9; + */ + TIME(9), + /** + * + * + *
+     * Combination of civil date and civil time
+     * 
+ * + * DATETIME = 10; + */ + DATETIME(10), + /** + * + * + *
+     * Geography object
+     * 
+ * + * GEOGRAPHY = 11; + */ + GEOGRAPHY(11), + /** + * + * + *
+     * Numeric value
+     * 
+ * + * NUMERIC = 12; + */ + NUMERIC(12), + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + BIGNUMERIC(13), + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + INTERVAL(14), + /** + * + * + *
+     * JSON, String
+     * 
+ * + * JSON = 15; + */ + JSON(15), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Illegal value
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * 64K, UTF8
+     * 
+ * + * STRING = 1; + */ + public static final int STRING_VALUE = 1; + /** + * + * + *
+     * 64-bit signed
+     * 
+ * + * INT64 = 2; + */ + public static final int INT64_VALUE = 2; + /** + * + * + *
+     * 64-bit IEEE floating point
+     * 
+ * + * DOUBLE = 3; + */ + public static final int DOUBLE_VALUE = 3; + /** + * + * + *
+     * Aggregate type
+     * 
+ * + * STRUCT = 4; + */ + public static final int STRUCT_VALUE = 4; + /** + * + * + *
+     * 64K, Binary
+     * 
+ * + * BYTES = 5; + */ + public static final int BYTES_VALUE = 5; + /** + * + * + *
+     * 2-valued
+     * 
+ * + * BOOL = 6; + */ + public static final int BOOL_VALUE = 6; + /** + * + * + *
+     * 64-bit signed usec since UTC epoch
+     * 
+ * + * TIMESTAMP = 7; + */ + public static final int TIMESTAMP_VALUE = 7; + /** + * + * + *
+     * Civil date - Year, Month, Day
+     * 
+ * + * DATE = 8; + */ + public static final int DATE_VALUE = 8; + /** + * + * + *
+     * Civil time - Hour, Minute, Second, Microseconds
+     * 
+ * + * TIME = 9; + */ + public static final int TIME_VALUE = 9; + /** + * + * + *
+     * Combination of civil date and civil time
+     * 
+ * + * DATETIME = 10; + */ + public static final int DATETIME_VALUE = 10; + /** + * + * + *
+     * Geography object
+     * 
+ * + * GEOGRAPHY = 11; + */ + public static final int GEOGRAPHY_VALUE = 11; + /** + * + * + *
+     * Numeric value
+     * 
+ * + * NUMERIC = 12; + */ + public static final int NUMERIC_VALUE = 12; + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + public static final int BIGNUMERIC_VALUE = 13; + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + public static final int INTERVAL_VALUE = 14; + /** + * + * + *
+     * JSON, String
+     * 
+ * + * JSON = 15; + */ + public static final int JSON_VALUE = 15; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return STRING; + case 2: + return INT64; + case 3: + return DOUBLE; + case 4: + return STRUCT; + case 5: + return BYTES; + case 6: + return BOOL; + case 7: + return TIMESTAMP; + case 8: + return DATE; + case 9: + return TIME; + case 10: + return DATETIME; + case 11: + return GEOGRAPHY; + case 12: + return NUMERIC; + case 13: + return BIGNUMERIC; + case 14: + return INTERVAL; + case 15: + return JSON; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.TableFieldSchema.Type) + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1.TableFieldSchema.Mode} */ + public enum Mode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Illegal value
+     * 
+ * + * MODE_UNSPECIFIED = 0; + */ + MODE_UNSPECIFIED(0), + /** NULLABLE = 1; */ + NULLABLE(1), + /** REQUIRED = 2; */ + REQUIRED(2), + /** REPEATED = 3; */ + REPEATED(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Illegal value
+     * 
+ * + * MODE_UNSPECIFIED = 0; + */ + public static final int MODE_UNSPECIFIED_VALUE = 0; + /** NULLABLE = 1; */ + public static final int NULLABLE_VALUE = 1; + /** REQUIRED = 2; */ + public static final int REQUIRED_VALUE = 2; + /** REPEATED = 3; */ + public static final int REPEATED_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Mode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Mode forNumber(int value) { + switch (value) { + case 0: + return MODE_UNSPECIFIED; + case 1: + return NULLABLE; + case 2: + return REQUIRED; + case 3: + return REPEATED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Mode findValueByNumber(int number) { + return Mode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final Mode[] VALUES = values(); + + public static Mode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Mode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.TableFieldSchema.Mode) + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_; + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + public static final int MODE_FIELD_NUMBER = 3; + private int mode_; + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode getMode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.valueOf(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + + public static final int FIELDS_FIELD_NUMBER = 4; + private java.util.List fields_; + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + public static final int DESCRIPTION_FIELD_NUMBER = 6; + private volatile java.lang.Object description_; + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MAX_LENGTH_FIELD_NUMBER = 7; + private long maxLength_; + /** + * + * + *
+   * Optional. Maximum length of values of this field for STRINGS or BYTES.
+   * If max_length is not specified, no maximum length constraint is imposed
+   * on this field.
+   * If type = "STRING", then max_length represents the maximum UTF-8
+   * length of strings in this field.
+   * If type = "BYTES", then max_length represents the maximum number of
+   * bytes in this field.
+   * It is invalid to set this field if type is not "STRING" or "BYTES".
+   * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxLength. + */ + @java.lang.Override + public long getMaxLength() { + return maxLength_; + } + + public static final int PRECISION_FIELD_NUMBER = 8; + private long precision_; + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) and scale
+   * (maximum number of digits in the fractional part in base 10) constraints
+   * for values of this field for NUMERIC or BIGNUMERIC.
+   * It is invalid to set precision or scale if type is not "NUMERIC" or
+   * "BIGNUMERIC".
+   * If precision and scale are not specified, no value range constraint is
+   * imposed on this field insofar as values are permitted by the type.
+   * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+   * * Precision (P) and scale (S) are specified:
+   *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+   * * Precision (P) is specified but not scale (and thus scale is
+   *   interpreted to be equal to zero):
+   *   [-10^P + 1, 10^P - 1].
+   * Acceptable values for precision and scale if both are specified:
+   * * If type = "NUMERIC":
+   *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+   * * If type = "BIGNUMERIC":
+   *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+   * Acceptable values for precision if only precision is specified but not
+   * scale (and thus scale is interpreted to be equal to zero):
+   * * If type = "NUMERIC": 1 <= precision <= 29.
+   * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+   * If scale is specified but not precision, then it is invalid.
+   * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The precision. + */ + @java.lang.Override + public long getPrecision() { + return precision_; + } + + public static final int SCALE_FIELD_NUMBER = 9; + private long scale_; + /** + * + * + *
+   * Optional. See documentation for precision.
+   * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The scale. + */ + @java.lang.Override + public long getScale() { + return scale_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(4, fields_.get(i)); + } + if (!getDescriptionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, description_); + } + if (maxLength_ != 0L) { + output.writeInt64(7, maxLength_); + } + if (precision_ != 0L) { + output.writeInt64(8, precision_); + } + if (scale_ != 0L) { + output.writeInt64(9, scale_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, fields_.get(i)); + } + if (!getDescriptionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, description_); + } + if (maxLength_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, maxLength_); + } + if (precision_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, precision_); + } + if (scale_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, scale_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.TableFieldSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.TableFieldSchema other = + (com.google.cloud.bigquery.storage.v1.TableFieldSchema) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (mode_ != other.mode_) return false; + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (getMaxLength() != other.getMaxLength()) return false; + if (getPrecision() != other.getPrecision()) return false; + if (getScale() != other.getScale()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + mode_; + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + hash = (37 * hash) + MAX_LENGTH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMaxLength()); + hash = (37 * hash) + PRECISION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getPrecision()); + hash = (37 * hash) + SCALE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getScale()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.TableFieldSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * TableFieldSchema defines a single field/column within a table schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableFieldSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.TableFieldSchema) + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.TableFieldSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getFieldsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + type_ = 0; + + mode_ = 0; + + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + fieldsBuilder_.clear(); + } + description_ = ""; + + maxLength_ = 0L; + + precision_ = 0L; + + scale_ = 0L; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema build() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema result = + new com.google.cloud.bigquery.storage.v1.TableFieldSchema(this); + int from_bitField0_ = bitField0_; + result.name_ = name_; + result.type_ = type_; + result.mode_ = mode_; + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + result.description_ = description_; + result.maxLength_ = maxLength_; + result.precision_ = precision_; + result.scale_ = scale_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.TableFieldSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.TableFieldSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.TableFieldSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.mode_ != 0) { + setModeValue(other.getModeValue()); + } + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + onChanged(); + } + if (other.getMaxLength() != 0L) { + setMaxLength(other.getMaxLength()); + } + if (other.getPrecision() != 0L) { + setPrecision(other.getPrecision()); + } + if (other.getScale() != 0L) { + setScale(other.getScale()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.TableFieldSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1.TableFieldSchema) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int type_ = 0; + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + + type_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type value) { + if (value == null) { + throw new NullPointerException(); + } + + type_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + + type_ = 0; + onChanged(); + return this; + } + + private int mode_ = 0; + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for mode to set. + * @return This builder for chaining. + */ + public Builder setModeValue(int value) { + + mode_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode getMode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.valueOf(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The mode to set. + * @return This builder for chaining. + */ + public Builder setMode(com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode value) { + if (value == null) { + throw new NullPointerException(); + } + + mode_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMode() { + + mode_ = 0; + onChanged(); + return this; + } + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList(fields_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllFields( + java.lang.Iterable + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + private java.lang.Object description_ = ""; + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + description_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + + description_ = getDefaultInstance().getDescription(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + description_ = value; + onChanged(); + return this; + } + + private long maxLength_; + /** + * + * + *
+     * Optional. Maximum length of values of this field for STRINGS or BYTES.
+     * If max_length is not specified, no maximum length constraint is imposed
+     * on this field.
+     * If type = "STRING", then max_length represents the maximum UTF-8
+     * length of strings in this field.
+     * If type = "BYTES", then max_length represents the maximum number of
+     * bytes in this field.
+     * It is invalid to set this field if type is not "STRING" or "BYTES".
+     * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxLength. + */ + @java.lang.Override + public long getMaxLength() { + return maxLength_; + } + /** + * + * + *
+     * Optional. Maximum length of values of this field for STRINGS or BYTES.
+     * If max_length is not specified, no maximum length constraint is imposed
+     * on this field.
+     * If type = "STRING", then max_length represents the maximum UTF-8
+     * length of strings in this field.
+     * If type = "BYTES", then max_length represents the maximum number of
+     * bytes in this field.
+     * It is invalid to set this field if type is not "STRING" or "BYTES".
+     * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The maxLength to set. + * @return This builder for chaining. + */ + public Builder setMaxLength(long value) { + + maxLength_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Maximum length of values of this field for STRINGS or BYTES.
+     * If max_length is not specified, no maximum length constraint is imposed
+     * on this field.
+     * If type = "STRING", then max_length represents the maximum UTF-8
+     * length of strings in this field.
+     * If type = "BYTES", then max_length represents the maximum number of
+     * bytes in this field.
+     * It is invalid to set this field if type is not "STRING" or "BYTES".
+     * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMaxLength() { + + maxLength_ = 0L; + onChanged(); + return this; + } + + private long precision_; + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) and scale
+     * (maximum number of digits in the fractional part in base 10) constraints
+     * for values of this field for NUMERIC or BIGNUMERIC.
+     * It is invalid to set precision or scale if type is not "NUMERIC" or
+     * "BIGNUMERIC".
+     * If precision and scale are not specified, no value range constraint is
+     * imposed on this field insofar as values are permitted by the type.
+     * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+     * * Precision (P) and scale (S) are specified:
+     *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+     * * Precision (P) is specified but not scale (and thus scale is
+     *   interpreted to be equal to zero):
+     *   [-10^P + 1, 10^P - 1].
+     * Acceptable values for precision and scale if both are specified:
+     * * If type = "NUMERIC":
+     *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+     * * If type = "BIGNUMERIC":
+     *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+     * Acceptable values for precision if only precision is specified but not
+     * scale (and thus scale is interpreted to be equal to zero):
+     * * If type = "NUMERIC": 1 <= precision <= 29.
+     * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+     * If scale is specified but not precision, then it is invalid.
+     * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The precision. + */ + @java.lang.Override + public long getPrecision() { + return precision_; + } + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) and scale
+     * (maximum number of digits in the fractional part in base 10) constraints
+     * for values of this field for NUMERIC or BIGNUMERIC.
+     * It is invalid to set precision or scale if type is not "NUMERIC" or
+     * "BIGNUMERIC".
+     * If precision and scale are not specified, no value range constraint is
+     * imposed on this field insofar as values are permitted by the type.
+     * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+     * * Precision (P) and scale (S) are specified:
+     *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+     * * Precision (P) is specified but not scale (and thus scale is
+     *   interpreted to be equal to zero):
+     *   [-10^P + 1, 10^P - 1].
+     * Acceptable values for precision and scale if both are specified:
+     * * If type = "NUMERIC":
+     *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+     * * If type = "BIGNUMERIC":
+     *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+     * Acceptable values for precision if only precision is specified but not
+     * scale (and thus scale is interpreted to be equal to zero):
+     * * If type = "NUMERIC": 1 <= precision <= 29.
+     * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+     * If scale is specified but not precision, then it is invalid.
+     * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The precision to set. + * @return This builder for chaining. + */ + public Builder setPrecision(long value) { + + precision_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) and scale
+     * (maximum number of digits in the fractional part in base 10) constraints
+     * for values of this field for NUMERIC or BIGNUMERIC.
+     * It is invalid to set precision or scale if type is not "NUMERIC" or
+     * "BIGNUMERIC".
+     * If precision and scale are not specified, no value range constraint is
+     * imposed on this field insofar as values are permitted by the type.
+     * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+     * * Precision (P) and scale (S) are specified:
+     *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+     * * Precision (P) is specified but not scale (and thus scale is
+     *   interpreted to be equal to zero):
+     *   [-10^P + 1, 10^P - 1].
+     * Acceptable values for precision and scale if both are specified:
+     * * If type = "NUMERIC":
+     *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+     * * If type = "BIGNUMERIC":
+     *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+     * Acceptable values for precision if only precision is specified but not
+     * scale (and thus scale is interpreted to be equal to zero):
+     * * If type = "NUMERIC": 1 <= precision <= 29.
+     * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+     * If scale is specified but not precision, then it is invalid.
+     * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPrecision() { + + precision_ = 0L; + onChanged(); + return this; + } + + private long scale_; + /** + * + * + *
+     * Optional. See documentation for precision.
+     * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The scale. + */ + @java.lang.Override + public long getScale() { + return scale_; + } + /** + * + * + *
+     * Optional. See documentation for precision.
+     * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The scale to set. + * @return This builder for chaining. + */ + public Builder setScale(long value) { + + scale_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. See documentation for precision.
+     * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearScale() { + + scale_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.TableFieldSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.TableFieldSchema) + private static final com.google.cloud.bigquery.storage.v1.TableFieldSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.TableFieldSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableFieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableFieldSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java new file mode 100644 index 0000000000..d011684437 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java @@ -0,0 +1,267 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface TableFieldSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.TableFieldSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType(); + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + int getModeValue(); + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode getMode(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getFieldsList(); + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index); + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getFieldsCount(); + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getFieldsOrBuilderList(); + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder(int index); + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + + /** + * + * + *
+   * Optional. Maximum length of values of this field for STRINGS or BYTES.
+   * If max_length is not specified, no maximum length constraint is imposed
+   * on this field.
+   * If type = "STRING", then max_length represents the maximum UTF-8
+   * length of strings in this field.
+   * If type = "BYTES", then max_length represents the maximum number of
+   * bytes in this field.
+   * It is invalid to set this field if type is not "STRING" or "BYTES".
+   * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxLength. + */ + long getMaxLength(); + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) and scale
+   * (maximum number of digits in the fractional part in base 10) constraints
+   * for values of this field for NUMERIC or BIGNUMERIC.
+   * It is invalid to set precision or scale if type is not "NUMERIC" or
+   * "BIGNUMERIC".
+   * If precision and scale are not specified, no value range constraint is
+   * imposed on this field insofar as values are permitted by the type.
+   * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+   * * Precision (P) and scale (S) are specified:
+   *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+   * * Precision (P) is specified but not scale (and thus scale is
+   *   interpreted to be equal to zero):
+   *   [-10^P + 1, 10^P - 1].
+   * Acceptable values for precision and scale if both are specified:
+   * * If type = "NUMERIC":
+   *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+   * * If type = "BIGNUMERIC":
+   *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+   * Acceptable values for precision if only precision is specified but not
+   * scale (and thus scale is interpreted to be equal to zero):
+   * * If type = "NUMERIC": 1 <= precision <= 29.
+   * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+   * If scale is specified but not precision, then it is invalid.
+   * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The precision. + */ + long getPrecision(); + + /** + * + * + *
+   * Optional. See documentation for precision.
+   * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The scale. + */ + long getScale(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java new file mode 100644 index 0000000000..d4baf9c6b8 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java @@ -0,0 +1,217 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class TableName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/datasets/{dataset}/tables/{table}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + + @Deprecated + protected TableName() { + project = null; + dataset = null; + table = null; + } + + private TableName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static TableName of(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build(); + } + + public static String format(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build().toString(); + } + + public static TableName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE.validatedMatch( + formattedString, "TableName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("dataset"), matchMap.get("table")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (TableName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE.instantiate( + "project", project, "dataset", dataset, "table", table); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null || getClass() == o.getClass()) { + TableName that = ((TableName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + private Builder(TableName tableName) { + this.project = tableName.project; + this.dataset = tableName.dataset; + this.table = tableName.table; + } + + public TableName build() { + return new TableName(this); + } + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java new file mode 100644 index 0000000000..a84bff59c6 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java @@ -0,0 +1,106 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +package com.google.cloud.bigquery.storage.v1; + +public final class TableProto { + private TableProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n,google/cloud/bigquery/storage/v1/table" + + ".proto\022 google.cloud.bigquery.storage.v1" + + "\032\037google/api/field_behavior.proto\"Q\n\013Tab" + + "leSchema\022B\n\006fields\030\001 \003(\01322.google.cloud." + + "bigquery.storage.v1.TableFieldSchema\"\205\005\n" + + "\020TableFieldSchema\022\021\n\004name\030\001 \001(\tB\003\340A\002\022J\n\004" + + "type\030\002 \001(\01627.google.cloud.bigquery.stora" + + "ge.v1.TableFieldSchema.TypeB\003\340A\002\022J\n\004mode" + + "\030\003 \001(\01627.google.cloud.bigquery.storage.v" + + "1.TableFieldSchema.ModeB\003\340A\001\022G\n\006fields\030\004" + + " \003(\01322.google.cloud.bigquery.storage.v1." + + "TableFieldSchemaB\003\340A\001\022\030\n\013description\030\006 \001" + + "(\tB\003\340A\001\022\027\n\nmax_length\030\007 \001(\003B\003\340A\001\022\026\n\tprec" + + "ision\030\010 \001(\003B\003\340A\001\022\022\n\005scale\030\t \001(\003B\003\340A\001\"\325\001\n" + + "\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\n\n\006STRING\020\001\022" + + "\t\n\005INT64\020\002\022\n\n\006DOUBLE\020\003\022\n\n\006STRUCT\020\004\022\t\n\005BY" + + "TES\020\005\022\010\n\004BOOL\020\006\022\r\n\tTIMESTAMP\020\007\022\010\n\004DATE\020\010" + + "\022\010\n\004TIME\020\t\022\014\n\010DATETIME\020\n\022\r\n\tGEOGRAPHY\020\013\022" + + "\013\n\007NUMERIC\020\014\022\016\n\nBIGNUMERIC\020\r\022\014\n\010INTERVAL" + + "\020\016\022\010\n\004JSON\020\017\"F\n\004Mode\022\024\n\020MODE_UNSPECIFIED" + + "\020\000\022\014\n\010NULLABLE\020\001\022\014\n\010REQUIRED\020\002\022\014\n\010REPEAT" + + "ED\020\003B\303\001\n$com.google.cloud.bigquery.stora" + + "ge.v1B\nTableProtoP\001ZGgoogle.golang.org/g" + + "enproto/googleapis/cloud/bigquery/storag" + + "e/v1;storage\252\002 Google.Cloud.BigQuery.Sto" + + "rage.V1\312\002 Google\\Cloud\\BigQuery\\Storage\\" + + "V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor, + new java.lang.String[] { + "Fields", + }); + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor, + new java.lang.String[] { + "Name", "Type", "Mode", "Fields", "Description", "MaxLength", "Precision", "Scale", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java new file mode 100644 index 0000000000..9004ff1d98 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java @@ -0,0 +1,961 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Schema of a table.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableSchema} + */ +public final class TableSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.TableSchema) + TableSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use TableSchema.newBuilder() to construct. + private TableSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableSchema() { + fields_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TableSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1.TableFieldSchema>(); + mutable_bitField0_ |= 0x00000001; + } + fields_.add( + input.readMessage( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.parser(), + extensionRegistry)); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableSchema.class, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder.class); + } + + public static final int FIELDS_FIELD_NUMBER = 1; + private java.util.List fields_; + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(1, fields_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, fields_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.TableSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.TableSchema other = + (com.google.cloud.bigquery.storage.v1.TableSchema) obj; + + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.TableSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Schema of a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.TableSchema) + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableSchema.class, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.TableSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getFieldsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema build() { + com.google.cloud.bigquery.storage.v1.TableSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.TableSchema result = + new com.google.cloud.bigquery.storage.v1.TableSchema(this); + int from_bitField0_ = bitField0_; + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.TableSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.TableSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.TableSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance()) + return this; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.TableSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.bigquery.storage.v1.TableSchema) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList(fields_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addAllFields( + java.lang.Iterable + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.TableSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.TableSchema) + private static final com.google.cloud.bigquery.storage.v1.TableSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.TableSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java new file mode 100644 index 0000000000..fff23ddb63 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface TableSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.TableSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + java.util.List getFieldsList(); + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index); + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + int getFieldsCount(); + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + java.util.List + getFieldsOrBuilderList(); + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder(int index); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java new file mode 100644 index 0000000000..0992f2ddbb --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java @@ -0,0 +1,1965 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Information about a single stream that gets data inside the storage system.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.WriteStream} + */ +public final class WriteStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.WriteStream) + WriteStreamOrBuilder { + private static final long serialVersionUID = 0L; + // Use WriteStream.newBuilder() to construct. + private WriteStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private WriteStream() { + name_ = ""; + type_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new WriteStream(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private WriteStream( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: + { + int rawValue = input.readEnum(); + + type_ = rawValue; + break; + } + case 26: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (createTime_ != null) { + subBuilder = createTime_.toBuilder(); + } + createTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(createTime_); + createTime_ = subBuilder.buildPartial(); + } + + break; + } + case 34: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (commitTime_ != null) { + subBuilder = commitTime_.toBuilder(); + } + commitTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(commitTime_); + commitTime_ = subBuilder.buildPartial(); + } + + break; + } + case 42: + { + com.google.cloud.bigquery.storage.v1.TableSchema.Builder subBuilder = null; + if (tableSchema_ != null) { + subBuilder = tableSchema_.toBuilder(); + } + tableSchema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1.TableSchema.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableSchema_); + tableSchema_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.WriteStream.class, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder.class); + } + + /** + * + * + *
+   * Type enum of the stream.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.WriteStream.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * Data will commit automatically and appear as soon as the write is
+     * acknowledged.
+     * 
+ * + * COMMITTED = 1; + */ + COMMITTED(1), + /** + * + * + *
+     * Data is invisible until the stream is committed.
+     * 
+ * + * PENDING = 2; + */ + PENDING(2), + /** + * + * + *
+     * Data is only visible up to the offset to which it was flushed.
+     * 
+ * + * BUFFERED = 3; + */ + BUFFERED(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * Data will commit automatically and appear as soon as the write is
+     * acknowledged.
+     * 
+ * + * COMMITTED = 1; + */ + public static final int COMMITTED_VALUE = 1; + /** + * + * + *
+     * Data is invisible until the stream is committed.
+     * 
+ * + * PENDING = 2; + */ + public static final int PENDING_VALUE = 2; + /** + * + * + *
+     * Data is only visible up to the offset to which it was flushed.
+     * 
+ * + * BUFFERED = 3; + */ + public static final int BUFFERED_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return COMMITTED; + case 2: + return PENDING; + case 3: + return BUFFERED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.WriteStream.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.WriteStream.Type) + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_; + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1.WriteStream.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.Type.UNRECOGNIZED + : result; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return createTime_ != null; + } + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return getCreateTime(); + } + + public static final int COMMIT_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp commitTime_; + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return commitTime_ != null; + } + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return getCommitTime(); + } + + public static final int TABLE_SCHEMA_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1.TableSchema tableSchema_; + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + @java.lang.Override + public boolean hasTableSchema() { + return tableSchema_ != null; + } + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getTableSchema() { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : tableSchema_; + } + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getTableSchemaOrBuilder() { + return getTableSchema(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.WriteStream.Type.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(2, type_); + } + if (createTime_ != null) { + output.writeMessage(3, getCreateTime()); + } + if (commitTime_ != null) { + output.writeMessage(4, getCommitTime()); + } + if (tableSchema_ != null) { + output.writeMessage(5, getTableSchema()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.WriteStream.Type.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (createTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (commitTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCommitTime()); + } + if (tableSchema_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getTableSchema()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.WriteStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.WriteStream other = + (com.google.cloud.bigquery.storage.v1.WriteStream) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (hasTableSchema() != other.hasTableSchema()) return false; + if (hasTableSchema()) { + if (!getTableSchema().equals(other.getTableSchema())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.WriteStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Information about a single stream that gets data inside the storage system.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.WriteStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.WriteStream) + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.WriteStream.class, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.WriteStream.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + type_ = 0; + + if (createTimeBuilder_ == null) { + createTime_ = null; + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + if (commitTimeBuilder_ == null) { + commitTime_ = null; + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + if (tableSchemaBuilder_ == null) { + tableSchema_ = null; + } else { + tableSchema_ = null; + tableSchemaBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream build() { + com.google.cloud.bigquery.storage.v1.WriteStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream buildPartial() { + com.google.cloud.bigquery.storage.v1.WriteStream result = + new com.google.cloud.bigquery.storage.v1.WriteStream(this); + result.name_ = name_; + result.type_ = type_; + if (createTimeBuilder_ == null) { + result.createTime_ = createTime_; + } else { + result.createTime_ = createTimeBuilder_.build(); + } + if (commitTimeBuilder_ == null) { + result.commitTime_ = commitTime_; + } else { + result.commitTime_ = commitTimeBuilder_.build(); + } + if (tableSchemaBuilder_ == null) { + result.tableSchema_ = tableSchema_; + } else { + result.tableSchema_ = tableSchemaBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.WriteStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.WriteStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.WriteStream other) { + if (other == com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.WriteStream parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.bigquery.storage.v1.WriteStream) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int type_ = 0; + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + + type_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1.WriteStream.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.Type.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.cloud.bigquery.storage.v1.WriteStream.Type value) { + if (value == null) { + throw new NullPointerException(); + } + + type_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + + type_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return createTimeBuilder_ != null || createTime_ != null; + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + onChanged(); + } else { + createTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + onChanged(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (createTime_ != null) { + createTime_ = + com.google.protobuf.Timestamp.newBuilder(createTime_).mergeFrom(value).buildPartial(); + } else { + createTime_ = value; + } + onChanged(); + } else { + createTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + if (createTimeBuilder_ == null) { + createTime_ = null; + onChanged(); + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return commitTimeBuilder_ != null || commitTime_ != null; + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + onChanged(); + } else { + commitTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + onChanged(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (commitTime_ != null) { + commitTime_ = + com.google.protobuf.Timestamp.newBuilder(commitTime_).mergeFrom(value).buildPartial(); + } else { + commitTime_ = value; + } + onChanged(); + } else { + commitTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCommitTime() { + if (commitTimeBuilder_ == null) { + commitTime_ = null; + onChanged(); + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.TableSchema tableSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + tableSchemaBuilder_; + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + public boolean hasTableSchema() { + return tableSchemaBuilder_ != null || tableSchema_ != null; + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + public com.google.cloud.bigquery.storage.v1.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + onChanged(); + } else { + tableSchemaBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema( + com.google.cloud.bigquery.storage.v1.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeTableSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (tableSchema_ != null) { + tableSchema_ = + com.google.cloud.bigquery.storage.v1.TableSchema.newBuilder(tableSchema_) + .mergeFrom(value) + .buildPartial(); + } else { + tableSchema_ = value; + } + onChanged(); + } else { + tableSchemaBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearTableSchema() { + if (tableSchemaBuilder_ == null) { + tableSchema_ = null; + onChanged(); + } else { + tableSchema_ = null; + tableSchemaBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableSchema.Builder getTableSchemaBuilder() { + + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : tableSchema_; + } + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder>( + getTableSchema(), getParentForChildren(), isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.WriteStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.WriteStream) + private static final com.google.cloud.bigquery.storage.v1.WriteStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.WriteStream(); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WriteStream(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java new file mode 100644 index 0000000000..7e47640e05 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java @@ -0,0 +1,257 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class WriteStreamName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE_STREAM = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + private final String stream; + + @Deprecated + protected WriteStreamName() { + project = null; + dataset = null; + table = null; + stream = null; + } + + private WriteStreamName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + stream = Preconditions.checkNotNull(builder.getStream()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public String getStream() { + return stream; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static WriteStreamName of(String project, String dataset, String table, String stream) { + return newBuilder() + .setProject(project) + .setDataset(dataset) + .setTable(table) + .setStream(stream) + .build(); + } + + public static String format(String project, String dataset, String table, String stream) { + return newBuilder() + .setProject(project) + .setDataset(dataset) + .setTable(table) + .setStream(stream) + .build() + .toString(); + } + + public static WriteStreamName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE_STREAM.validatedMatch( + formattedString, "WriteStreamName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("dataset"), + matchMap.get("table"), + matchMap.get("stream")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (WriteStreamName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE_STREAM.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + if (stream != null) { + fieldMapBuilder.put("stream", stream); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE_STREAM.instantiate( + "project", project, "dataset", dataset, "table", table, "stream", stream); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null || getClass() == o.getClass()) { + WriteStreamName that = ((WriteStreamName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table) + && Objects.equals(this.stream, that.stream); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + h *= 1000003; + h ^= Objects.hashCode(stream); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + private String stream; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public String getStream() { + return stream; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + public Builder setStream(String stream) { + this.stream = stream; + return this; + } + + private Builder(WriteStreamName writeStreamName) { + this.project = writeStreamName.project; + this.dataset = writeStreamName.dataset; + this.table = writeStreamName.table; + this.stream = writeStreamName.stream; + } + + public WriteStreamName build() { + return new WriteStreamName(this); + } + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java new file mode 100644 index 0000000000..c14850b8a1 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java @@ -0,0 +1,219 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface WriteStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.WriteStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1.WriteStream.Type getType(); + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + boolean hasTableSchema(); + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + com.google.cloud.bigquery.storage.v1.TableSchema getTableSchema(); + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getTableSchemaOrBuilder(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/protobuf.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/protobuf.proto new file mode 100644 index 0000000000..f987467dd9 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/protobuf.proto @@ -0,0 +1,48 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/protobuf/descriptor.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "ProtoBufProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// ProtoSchema describes the schema of the serialized protocol buffer data rows. +message ProtoSchema { + // Descriptor for input message. The provided descriptor must be self + // contained, such that data rows sent can be fully decoded using only the + // single descriptor. For data rows that are compositions of multiple + // independent messages, this means the descriptor may need to be transformed + // to only use nested types: + // https://developers.google.com/protocol-buffers/docs/proto#nested + // + // For additional information for how proto types and values map onto BigQuery + // see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions + google.protobuf.DescriptorProto proto_descriptor = 1; +} + +message ProtoRows { + // A sequence of rows serialized as a Protocol Buffer. + // + // See https://developers.google.com/protocol-buffers/docs/overview for more + // information on deserializing this field. + repeated bytes serialized_rows = 1; +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto index e86ad253cf..ab5a46cf18 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto @@ -22,7 +22,12 @@ import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/cloud/bigquery/storage/v1/arrow.proto"; import "google/cloud/bigquery/storage/v1/avro.proto"; +import "google/cloud/bigquery/storage/v1/protobuf.proto"; import "google/cloud/bigquery/storage/v1/stream.proto"; +import "google/cloud/bigquery/storage/v1/table.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; @@ -105,6 +110,121 @@ service BigQueryRead { } } +// BigQuery Write API. +// +// The Write API can be used to write data to BigQuery. +// +// For supplementary information about the Write API, see: +// https://cloud.google.com/bigquery/docs/write-api +service BigQueryWrite { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.insertdata," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a write stream to the given table. + // Additionally, every table has a special stream named '_default' + // to which data can be written. This stream doesn't need to be created using + // CreateWriteStream. It is a stream that can be used simultaneously by any + // number of clients. Data written to this stream is considered committed as + // soon as an acknowledgement is received. + rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/datasets/*/tables/*}" + body: "write_stream" + }; + option (google.api.method_signature) = "parent,write_stream"; + } + + // Appends data to the given stream. + // + // If `offset` is specified, the `offset` is checked against the end of + // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an + // attempt is made to append to an offset beyond the current end of the stream + // or `ALREADY_EXISTS` if user provides an `offset` that has already been + // written to. User can retry with adjusted offset within the same RPC + // connection. If `offset` is not specified, append happens at the end of the + // stream. + // + // The response contains an optional offset at which the append + // happened. No offset information will be returned for appends to a + // default stream. + // + // Responses are received in the same order in which requests are sent. + // There will be one response for each successful inserted request. Responses + // may optionally embed error information if the originating AppendRequest was + // not successfully processed. + // + // The specifics of when successfully appended data is made visible to the + // table are governed by the type of stream: + // + // * For COMMITTED streams (which includes the default stream), data is + // visible immediately upon successful append. + // + // * For BUFFERED streams, data is made visible via a subsequent `FlushRows` + // rpc which advances a cursor to a newer offset in the stream. + // + // * For PENDING streams, data is not made visible until the stream itself is + // finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly + // committed via the `BatchCommitWriteStreams` rpc. + rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { + option (google.api.http) = { + post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } + + // Gets information about a write stream. + rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Finalize a write stream so that no new data can be appended to the + // stream. Finalize is not supported on the '_default' stream. + rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { + option (google.api.http) = { + post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Atomically commits a group of `PENDING` streams that belong to the same + // `parent` table. + // + // Streams must be finalized before commit and cannot be committed multiple + // times. Once a stream is committed, data in the stream becomes available + // for read operations. + rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/datasets/*/tables/*}" + }; + option (google.api.method_signature) = "parent"; + } + + // Flushes rows to a BUFFERED stream. + // + // If users are appending rows to BUFFERED stream, flush operation is + // required in order for the rows to become available for reading. A + // Flush operation flushes up to any previously flushed offset in a BUFFERED + // stream, to the offset specified in the request. + // + // Flush is not supported on the _default stream, since it is not BUFFERED. + rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { + option (google.api.http) = { + post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } +} + // Request message for `CreateReadSession`. message CreateReadSessionRequest { // Required. The request project that owns the session, in the form of @@ -153,7 +273,7 @@ message ThrottleState { int32 throttle_percent = 1; } -// Estimated stream statistics for a given Stream. +// Estimated stream statistics for a given read Stream. message StreamStats { message Progress { // The fraction of rows assigned to the stream that have been processed by @@ -246,3 +366,242 @@ message SplitReadStreamResponse { // value indicates that the original stream can no longer be split. ReadStream remainder_stream = 2; } + +// Request message for `CreateWriteStream`. +message CreateWriteStreamRequest { + // Required. Reference to the table to which the stream belongs, in the format + // of `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquery.googleapis.com/Table" + } + ]; + + // Required. Stream to be created. + WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for `AppendRows`. +// +// Due to the nature of AppendRows being a bidirectional streaming RPC, certain +// parts of the AppendRowsRequest need only be specified for the first request +// sent each time the gRPC network connection is opened/reopened. +message AppendRowsRequest { + // ProtoData contains the data rows and schema when constructing append + // requests. + message ProtoData { + // Proto schema used to serialize the data. This value only needs to be + // provided as part of the first request on a gRPC network connection, + // and will be ignored for subsequent requests on the connection. + ProtoSchema writer_schema = 1; + + // Serialized row data in protobuf message format. + // Currently, the backend expects the serialized rows to adhere to + // proto2 semantics when appending rows, particularly with respect to + // how default values are encoded. + ProtoRows rows = 2; + } + + // Required. The write_stream identifies the target of the append operation, and only + // needs to be specified as part of the first request on the gRPC connection. + // If provided for subsequent requests, it must match the value of the first + // request. + // + // For explicitly created write streams, the format is: + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` + // + // For the special default stream, the format is: + // `projects/{project}/datasets/{dataset}/tables/{table}/_default`. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // If present, the write is only performed if the next append offset is same + // as the provided value. If not present, the write is performed at the + // current end of stream. Specifying a value for this field is not allowed + // when calling AppendRows for the '_default' stream. + google.protobuf.Int64Value offset = 2; + + // Input rows. The `writer_schema` field must be specified at the initial + // request and currently, it will be ignored if specified in following + // requests. Following requests must have data in the same format as the + // initial request. + oneof rows { + // Rows in proto format. + ProtoData proto_rows = 4; + } + + // Id set by client to annotate its identity. Only initial request setting is + // respected. + string trace_id = 6; +} + +// Response message for `AppendRows`. +message AppendRowsResponse { + // AppendResult is returned for successful append requests. + message AppendResult { + // The row offset at which the last append occurred. The offset will not be + // set if appending using default streams. + google.protobuf.Int64Value offset = 1; + } + + oneof response { + // Result if the append is successful. + AppendResult append_result = 1; + + // Error returned when problems were encountered. If present, + // it indicates rows were not accepted into the system. + // Users can retry or continue with other append requests within the + // same connection. + // + // Additional information about error signalling: + // + // ALREADY_EXISTS: Happens when an append specified an offset, and the + // backend already has received data at this offset. Typically encountered + // in retry scenarios, and can be ignored. + // + // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond + // the current end of the stream. + // + // INVALID_ARGUMENT: Indicates a malformed request or data. + // + // ABORTED: Request processing is aborted because of prior failures. The + // request can be retried if previous failure is addressed. + // + // INTERNAL: Indicates server side error(s) that can be retried. + google.rpc.Status error = 2; + } + + // If backend detects a schema update, pass it to user so that user can + // use it to input new type of message. It will be empty when no schema + // updates have occurred. + TableSchema updated_schema = 3; +} + +// Request message for `GetWriteStreamRequest`. +message GetWriteStreamRequest { + // Required. Name of the stream to get, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; +} + +// Request message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsRequest { + // Required. Parent table that all the streams should belong to, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED + ]; + + // Required. The group of streams that will be committed atomically. + repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsResponse { + // The time at which streams were committed in microseconds granularity. + // This field will only exist when there are no stream errors. + // **Note** if this field is not set, it means the commit was not successful. + google.protobuf.Timestamp commit_time = 1; + + // Stream level error if commit failed. Only streams with error will be in + // the list. + // If empty, there is no error and all streams are committed successfully. + // If non empty, certain streams have errors and ZERO stream is committed due + // to atomicity guarantee. + repeated StorageError stream_errors = 2; +} + +// Request message for invoking `FinalizeWriteStream`. +message FinalizeWriteStreamRequest { + // Required. Name of the stream to finalize, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; +} + +// Response message for `FinalizeWriteStream`. +message FinalizeWriteStreamResponse { + // Number of rows in the finalized stream. + int64 row_count = 1; +} + +// Request message for `FlushRows`. +message FlushRowsRequest { + // Required. The stream that is the target of the flush operation. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // Ending offset of the flush operation. Rows before this offset(including + // this offset) will be flushed. + google.protobuf.Int64Value offset = 2; +} + +// Respond message for `FlushRows`. +message FlushRowsResponse { + // The rows before this offset (including this offset) are flushed. + int64 offset = 1; +} + +// Structured custom BigQuery Storage error message. The error can be attached +// as error details in the returned rpc Status. In particular, the use of error +// codes allows more structured error handling, and reduces the need to evaluate +// unstructured error text strings. +message StorageError { + // Error code for `StorageError`. + enum StorageErrorCode { + // Default error. + STORAGE_ERROR_CODE_UNSPECIFIED = 0; + + // Table is not found in the system. + TABLE_NOT_FOUND = 1; + + // Stream is already committed. + STREAM_ALREADY_COMMITTED = 2; + + // Stream is not found. + STREAM_NOT_FOUND = 3; + + // Invalid Stream type. + // For example, you try to commit a stream that is not pending. + INVALID_STREAM_TYPE = 4; + + // Invalid Stream state. + // For example, you try to commit a stream that is not finalized or is + // garbaged. + INVALID_STREAM_STATE = 5; + + // Stream is finalized. + STREAM_FINALIZED = 6; + + // There is a schema mismatch and it is caused by user schema has extra + // field than bigquery schema. + SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + } + + // BigQuery Storage specific error code. + StorageErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto index 51aeaac465..dc62e8c00c 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto @@ -20,6 +20,7 @@ import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/cloud/bigquery/storage/v1/arrow.proto"; import "google/cloud/bigquery/storage/v1/avro.proto"; +import "google/cloud/bigquery/storage/v1/table.proto"; import "google/protobuf/timestamp.proto"; option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; @@ -146,3 +147,50 @@ message ReadStream { // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; } + +// Information about a single stream that gets data inside the storage system. +message WriteStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/WriteStream" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}" + }; + + // Type enum of the stream. + enum Type { + // Unknown type. + TYPE_UNSPECIFIED = 0; + + // Data will commit automatically and appear as soon as the write is + // acknowledged. + COMMITTED = 1; + + // Data is invisible until the stream is committed. + PENDING = 2; + + // Data is only visible up to the offset to which it was flushed. + BUFFERED = 3; + } + + // Output only. Name of the stream, in the form + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Type of the stream. + Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. Create time of the stream. For the _default stream, this is the + // creation_time of the table. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Commit time of the stream. + // If a stream is of `COMMITTED` type, then it will have a commit_time same as + // `create_time`. If the stream is of `PENDING` type, empty commit_time + // means it is not committed. + google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the destination table. It is only returned in + // `CreateWriteStream` response. Caller should generate data that's + // compatible with this schema to send in initial `AppendRowsRequest`. + // The table schema could go out of date during the life time of the stream. + TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto new file mode 100644 index 0000000000..a8c6f844df --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto @@ -0,0 +1,164 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/api/field_behavior.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Schema of a table. +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; +} + +// TableFieldSchema defines a single field/column within a table schema. +message TableFieldSchema { + enum Type { + // Illegal value + TYPE_UNSPECIFIED = 0; + + // 64K, UTF8 + STRING = 1; + + // 64-bit signed + INT64 = 2; + + // 64-bit IEEE floating point + DOUBLE = 3; + + // Aggregate type + STRUCT = 4; + + // 64K, Binary + BYTES = 5; + + // 2-valued + BOOL = 6; + + // 64-bit signed usec since UTC epoch + TIMESTAMP = 7; + + // Civil date - Year, Month, Day + DATE = 8; + + // Civil time - Hour, Minute, Second, Microseconds + TIME = 9; + + // Combination of civil date and civil time + DATETIME = 10; + + // Geography object + GEOGRAPHY = 11; + + // Numeric value + NUMERIC = 12; + + // BigNumeric value + BIGNUMERIC = 13; + + // Interval + INTERVAL = 14; + + // JSON, String + JSON = 15; + } + + enum Mode { + // Illegal value + MODE_UNSPECIFIED = 0; + + NULLABLE = 1; + + REQUIRED = 2; + + REPEATED = 3; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 128 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. + Type type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. The default value is NULLABLE. + Mode mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set to STRUCT. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + string description = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum length of values of this field for STRINGS or BYTES. + // + // If max_length is not specified, no maximum length constraint is imposed + // on this field. + // + // If type = "STRING", then max_length represents the maximum UTF-8 + // length of strings in this field. + // + // If type = "BYTES", then max_length represents the maximum number of + // bytes in this field. + // + // It is invalid to set this field if type is not "STRING" or "BYTES". + int64 max_length = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Precision (maximum number of total digits in base 10) and scale + // (maximum number of digits in the fractional part in base 10) constraints + // for values of this field for NUMERIC or BIGNUMERIC. + // + // It is invalid to set precision or scale if type is not "NUMERIC" or + // "BIGNUMERIC". + // + // If precision and scale are not specified, no value range constraint is + // imposed on this field insofar as values are permitted by the type. + // + // Values of this NUMERIC or BIGNUMERIC field must be in this range when: + // + // * Precision (P) and scale (S) are specified: + // [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)] + // * Precision (P) is specified but not scale (and thus scale is + // interpreted to be equal to zero): + // [-10^P + 1, 10^P - 1]. + // + // Acceptable values for precision and scale if both are specified: + // + // * If type = "NUMERIC": + // 1 <= precision - scale <= 29 and 0 <= scale <= 9. + // * If type = "BIGNUMERIC": + // 1 <= precision - scale <= 38 and 0 <= scale <= 38. + // + // Acceptable values for precision if only precision is specified but not + // scale (and thus scale is interpreted to be equal to zero): + // + // * If type = "NUMERIC": 1 <= precision <= 29. + // * If type = "BIGNUMERIC": 1 <= precision <= 38. + // + // If scale is specified but not precision, then it is invalid. + int64 precision = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. See documentation for precision. + int64 scale = 9 [(google.api.field_behavior) = OPTIONAL]; +}