diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java
index 3606e6f57b..edeb6e2800 100644
--- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java
+++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java
@@ -26,6 +26,8 @@
import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream;
import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStub;
@@ -591,6 +593,108 @@ public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(
return stub.batchCommitWriteStreamsCallable();
}
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush
+ * operation is required in order for the rows to become available for reading. A Flush operation
+ * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in
+ * the request.
+ *
+ *
Sample code:
+ *
+ *
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ * WriteStreamName writeStream = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+ * FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
+ * }
+ *
+ *
+ * @param writeStream Required. The stream that is the target of the flush operation.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final FlushRowsResponse flushRows(WriteStreamName writeStream) {
+ FlushRowsRequest request =
+ FlushRowsRequest.newBuilder()
+ .setWriteStream(writeStream == null ? null : writeStream.toString())
+ .build();
+ return flushRows(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush
+ * operation is required in order for the rows to become available for reading. A Flush operation
+ * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in
+ * the request.
+ *
+ * Sample code:
+ *
+ *
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ * WriteStreamName writeStream = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+ * FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream.toString());
+ * }
+ *
+ *
+ * @param writeStream Required. The stream that is the target of the flush operation.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final FlushRowsResponse flushRows(String writeStream) {
+ FlushRowsRequest request = FlushRowsRequest.newBuilder().setWriteStream(writeStream).build();
+ return flushRows(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush
+ * operation is required in order for the rows to become available for reading. A Flush operation
+ * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in
+ * the request.
+ *
+ * Sample code:
+ *
+ *
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ * WriteStreamName writeStream = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+ * FlushRowsRequest request = FlushRowsRequest.newBuilder()
+ * .setWriteStream(writeStream.toString())
+ * .build();
+ * FlushRowsResponse response = bigQueryWriteClient.flushRows(request);
+ * }
+ *
+ *
+ * @param request The request object containing all of the parameters for the API call.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final FlushRowsResponse flushRows(FlushRowsRequest request) {
+ return flushRowsCallable().call(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush
+ * operation is required in order for the rows to become available for reading. A Flush operation
+ * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in
+ * the request.
+ *
+ * Sample code:
+ *
+ *
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ * WriteStreamName writeStream = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+ * FlushRowsRequest request = FlushRowsRequest.newBuilder()
+ * .setWriteStream(writeStream.toString())
+ * .build();
+ * ApiFuture<FlushRowsResponse> future = bigQueryWriteClient.flushRowsCallable().futureCall(request);
+ * // Do something
+ * FlushRowsResponse response = future.get();
+ * }
+ *
+ */
+ public final UnaryCallable flushRowsCallable() {
+ return stub.flushRowsCallable();
+ }
+
@Override
public final void close() {
stub.close();
diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java
index d7e3ff0522..a029c17d0e 100644
--- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java
+++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java
@@ -33,6 +33,8 @@
import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream;
import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStubSettings;
@@ -102,6 +104,11 @@ public UnaryCallSettings getWriteStreamSetti
return ((BigQueryWriteStubSettings) getStubSettings()).batchCommitWriteStreamsSettings();
}
+ /** Returns the object with the settings used for calls to flushRows. */
+ public UnaryCallSettings flushRowsSettings() {
+ return ((BigQueryWriteStubSettings) getStubSettings()).flushRowsSettings();
+ }
+
public static final BigQueryWriteSettings create(BigQueryWriteStubSettings stub)
throws IOException {
return new BigQueryWriteSettings.Builder(stub.toBuilder()).build();
@@ -229,6 +236,11 @@ public UnaryCallSettings.Builder getWriteStr
return getStubSettingsBuilder().batchCommitWriteStreamsSettings();
}
+ /** Returns the builder for the settings used for calls to flushRows. */
+ public UnaryCallSettings.Builder flushRowsSettings() {
+ return getStubSettingsBuilder().flushRowsSettings();
+ }
+
@Override
public BigQueryWriteSettings build() throws IOException {
return new BigQueryWriteSettings(this);
diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java
index 7f319a47ce..c86dcd8a28 100644
--- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java
+++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java
@@ -26,6 +26,8 @@
import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream;
import javax.annotation.Generated;
@@ -62,6 +64,10 @@ public UnaryCallable getWriteStreamCallable(
throw new UnsupportedOperationException("Not implemented: batchCommitWriteStreamsCallable()");
}
+ public UnaryCallable flushRowsCallable() {
+ throw new UnsupportedOperationException("Not implemented: flushRowsCallable()");
+ }
+
@Override
public abstract void close();
}
diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java
index 7dba9c85fa..161fe0ed2d 100644
--- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java
+++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java
@@ -38,6 +38,8 @@
import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream;
import com.google.common.collect.ImmutableList;
@@ -99,6 +101,7 @@ public class BigQueryWriteStubSettings extends StubSettings
batchCommitWriteStreamsSettings;
+ private final UnaryCallSettings flushRowsSettings;
/** Returns the object with the settings used for calls to createWriteStream. */
public UnaryCallSettings createWriteStreamSettings() {
@@ -127,6 +130,11 @@ public UnaryCallSettings getWriteStreamSetti
return batchCommitWriteStreamsSettings;
}
+ /** Returns the object with the settings used for calls to flushRows. */
+ public UnaryCallSettings flushRowsSettings() {
+ return flushRowsSettings;
+ }
+
@BetaApi("A restructuring of stub classes is planned, so this may break in the future")
public BigQueryWriteStub createStub() throws IOException {
if (getTransportChannelProvider()
@@ -201,6 +209,7 @@ protected BigQueryWriteStubSettings(Builder settingsBuilder) throws IOException
getWriteStreamSettings = settingsBuilder.getWriteStreamSettings().build();
finalizeWriteStreamSettings = settingsBuilder.finalizeWriteStreamSettings().build();
batchCommitWriteStreamsSettings = settingsBuilder.batchCommitWriteStreamsSettings().build();
+ flushRowsSettings = settingsBuilder.flushRowsSettings().build();
}
/** Builder for BigQueryWriteStubSettings. */
@@ -218,6 +227,7 @@ public static class Builder extends StubSettings.Builder
batchCommitWriteStreamsSettings;
+ private final UnaryCallSettings.Builder flushRowsSettings;
private static final ImmutableMap>
RETRYABLE_CODE_DEFINITIONS;
@@ -270,12 +280,15 @@ protected Builder(ClientContext clientContext) {
batchCommitWriteStreamsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder();
+ flushRowsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder();
+
unaryMethodSettingsBuilders =
ImmutableList.>of(
createWriteStreamSettings,
getWriteStreamSettings,
finalizeWriteStreamSettings,
- batchCommitWriteStreamsSettings);
+ batchCommitWriteStreamsSettings,
+ flushRowsSettings);
initDefaults(this);
}
@@ -311,6 +324,11 @@ private static Builder initDefaults(Builder builder) {
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default"));
+ builder
+ .flushRowsSettings()
+ .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent"))
+ .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default"));
+
return builder;
}
@@ -322,13 +340,15 @@ protected Builder(BigQueryWriteStubSettings settings) {
getWriteStreamSettings = settings.getWriteStreamSettings.toBuilder();
finalizeWriteStreamSettings = settings.finalizeWriteStreamSettings.toBuilder();
batchCommitWriteStreamsSettings = settings.batchCommitWriteStreamsSettings.toBuilder();
+ flushRowsSettings = settings.flushRowsSettings.toBuilder();
unaryMethodSettingsBuilders =
ImmutableList.>of(
createWriteStreamSettings,
getWriteStreamSettings,
finalizeWriteStreamSettings,
- batchCommitWriteStreamsSettings);
+ batchCommitWriteStreamsSettings,
+ flushRowsSettings);
}
// NEXT_MAJOR_VER: remove 'throws Exception'
@@ -377,6 +397,11 @@ public UnaryCallSettings.Builder getWriteStr
return batchCommitWriteStreamsSettings;
}
+ /** Returns the builder for the settings used for calls to flushRows. */
+ public UnaryCallSettings.Builder flushRowsSettings() {
+ return flushRowsSettings;
+ }
+
@Override
public BigQueryWriteStubSettings build() throws IOException {
return new BigQueryWriteStubSettings(this);
diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java
index 3d1f96498a..7729ba6f9b 100644
--- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java
+++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java
@@ -31,6 +31,8 @@
import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream;
import com.google.common.collect.ImmutableMap;
@@ -103,6 +105,14 @@ public class GrpcBigQueryWriteStub extends BigQueryWriteStub {
.setResponseMarshaller(
ProtoUtils.marshaller(BatchCommitWriteStreamsResponse.getDefaultInstance()))
.build();
+ private static final MethodDescriptor
+ flushRowsMethodDescriptor =
+ MethodDescriptor.newBuilder()
+ .setType(MethodDescriptor.MethodType.UNARY)
+ .setFullMethodName("google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/FlushRows")
+ .setRequestMarshaller(ProtoUtils.marshaller(FlushRowsRequest.getDefaultInstance()))
+ .setResponseMarshaller(ProtoUtils.marshaller(FlushRowsResponse.getDefaultInstance()))
+ .build();
private final BackgroundResource backgroundResources;
@@ -113,6 +123,7 @@ public class GrpcBigQueryWriteStub extends BigQueryWriteStub {
finalizeWriteStreamCallable;
private final UnaryCallable
batchCommitWriteStreamsCallable;
+ private final UnaryCallable flushRowsCallable;
private final GrpcStubCallableFactory callableFactory;
@@ -212,6 +223,19 @@ public Map extract(BatchCommitWriteStreamsRequest request) {
}
})
.build();
+ GrpcCallSettings flushRowsTransportSettings =
+ GrpcCallSettings.newBuilder()
+ .setMethodDescriptor(flushRowsMethodDescriptor)
+ .setParamsExtractor(
+ new RequestParamsExtractor() {
+ @Override
+ public Map extract(FlushRowsRequest request) {
+ ImmutableMap.Builder params = ImmutableMap.builder();
+ params.put("write_stream", String.valueOf(request.getWriteStream()));
+ return params.build();
+ }
+ })
+ .build();
this.createWriteStreamCallable =
callableFactory.createUnaryCallable(
@@ -234,6 +258,9 @@ public Map extract(BatchCommitWriteStreamsRequest request) {
batchCommitWriteStreamsTransportSettings,
settings.batchCommitWriteStreamsSettings(),
clientContext);
+ this.flushRowsCallable =
+ callableFactory.createUnaryCallable(
+ flushRowsTransportSettings, settings.flushRowsSettings(), clientContext);
backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources());
}
@@ -260,6 +287,10 @@ public UnaryCallable getWriteStreamCallable(
return batchCommitWriteStreamsCallable;
}
+ public UnaryCallable flushRowsCallable() {
+ return flushRowsCallable;
+ }
+
@Override
public final void close() {
shutdown();
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java
index 2c990f039b..f035c493f5 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java
@@ -33,6 +33,8 @@
import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream;
import com.google.protobuf.AbstractMessage;
@@ -307,4 +309,45 @@ public void batchCommitWriteStreamsExceptionTest() throws Exception {
// Expected exception
}
}
+
+ @Test
+ @SuppressWarnings("all")
+ public void flushRowsTest() {
+ long offset = 1019779949L;
+ FlushRowsResponse expectedResponse = FlushRowsResponse.newBuilder().setOffset(offset).build();
+ mockBigQueryWrite.addResponse(expectedResponse);
+
+ WriteStreamName writeStream =
+ WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+
+ FlushRowsResponse actualResponse = client.flushRows(writeStream);
+ Assert.assertEquals(expectedResponse, actualResponse);
+
+ List actualRequests = mockBigQueryWrite.getRequests();
+ Assert.assertEquals(1, actualRequests.size());
+ FlushRowsRequest actualRequest = (FlushRowsRequest) actualRequests.get(0);
+
+ Assert.assertEquals(writeStream, WriteStreamName.parse(actualRequest.getWriteStream()));
+ Assert.assertTrue(
+ channelProvider.isHeaderSent(
+ ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
+ GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
+ }
+
+ @Test
+ @SuppressWarnings("all")
+ public void flushRowsExceptionTest() throws Exception {
+ StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
+ mockBigQueryWrite.addException(exception);
+
+ try {
+ WriteStreamName writeStream =
+ WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+
+ client.flushRows(writeStream);
+ Assert.fail("No exception raised");
+ } catch (InvalidArgumentException e) {
+ // Expected exception
+ }
+ }
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java
index a82a3dbdb3..ecc8e99e05 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java
@@ -24,6 +24,8 @@
import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest;
+import com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse;
import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest;
import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream;
import com.google.protobuf.AbstractMessage;
@@ -157,4 +159,19 @@ public void batchCommitWriteStreams(
responseObserver.onError(new IllegalArgumentException("Unrecognized response type"));
}
}
+
+ @Override
+ public void flushRows(
+ FlushRowsRequest request, StreamObserver responseObserver) {
+ Object response = responses.remove();
+ if (response instanceof FlushRowsResponse) {
+ requests.add(request);
+ responseObserver.onNext((FlushRowsResponse) response);
+ responseObserver.onCompleted();
+ } else if (response instanceof Exception) {
+ responseObserver.onError((Exception) response);
+ } else {
+ responseObserver.onError(new IllegalArgumentException("Unrecognized response type"));
+ }
+ }
}
diff --git a/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java
index 87d16ed0b7..cd9ae52f4e 100644
--- a/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java
+++ b/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java
@@ -291,6 +291,52 @@ private BigQueryWriteGrpc() {}
return getBatchCommitWriteStreamsMethod;
}
+ private static volatile io.grpc.MethodDescriptor<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>
+ getFlushRowsMethod;
+
+ @io.grpc.stub.annotations.RpcMethod(
+ fullMethodName = SERVICE_NAME + '/' + "FlushRows",
+ requestType = com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.class,
+ responseType = com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.class,
+ methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
+ public static io.grpc.MethodDescriptor<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>
+ getFlushRowsMethod() {
+ io.grpc.MethodDescriptor<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>
+ getFlushRowsMethod;
+ if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) {
+ synchronized (BigQueryWriteGrpc.class) {
+ if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) {
+ BigQueryWriteGrpc.getFlushRowsMethod =
+ getFlushRowsMethod =
+ io.grpc.MethodDescriptor
+ .
+ newBuilder()
+ .setType(io.grpc.MethodDescriptor.MethodType.UNARY)
+ .setFullMethodName(generateFullMethodName(SERVICE_NAME, "FlushRows"))
+ .setSampledToLocalTracing(true)
+ .setRequestMarshaller(
+ io.grpc.protobuf.ProtoUtils.marshaller(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ .getDefaultInstance()))
+ .setResponseMarshaller(
+ io.grpc.protobuf.ProtoUtils.marshaller(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ .getDefaultInstance()))
+ .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("FlushRows"))
+ .build();
+ }
+ }
+ }
+ return getFlushRowsMethod;
+ }
+
/** Creates a new async stub that supports all call types for the service */
public static BigQueryWriteStub newStub(io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory factory =
@@ -436,6 +482,25 @@ public void batchCommitWriteStreams(
asyncUnimplementedUnaryCall(getBatchCommitWriteStreamsMethod(), responseObserver);
}
+ /**
+ *
+ *
+ *
+ * Flushes rows to a BUFFERED stream.
+ * If users are appending rows to BUFFERED stream, flush operation is
+ * required in order for the rows to become available for reading. A
+ * Flush operation flushes up to any previously flushed offset in a BUFFERED
+ * stream, to the offset specified in the request.
+ *
+ */
+ public void flushRows(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request,
+ io.grpc.stub.StreamObserver<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>
+ responseObserver) {
+ asyncUnimplementedUnaryCall(getFlushRowsMethod(), responseObserver);
+ }
+
@java.lang.Override
public final io.grpc.ServerServiceDefinition bindService() {
return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
@@ -476,6 +541,13 @@ public final io.grpc.ServerServiceDefinition bindService() {
com.google.cloud.bigquery.storage.v1alpha2.Storage
.BatchCommitWriteStreamsResponse>(
this, METHODID_BATCH_COMMIT_WRITE_STREAMS)))
+ .addMethod(
+ getFlushRowsMethod(),
+ asyncUnaryCall(
+ new MethodHandlers<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>(
+ this, METHODID_FLUSH_ROWS)))
.build();
}
}
@@ -605,6 +677,26 @@ public void batchCommitWriteStreams(
request,
responseObserver);
}
+
+ /**
+ *
+ *
+ *
+ * Flushes rows to a BUFFERED stream.
+ * If users are appending rows to BUFFERED stream, flush operation is
+ * required in order for the rows to become available for reading. A
+ * Flush operation flushes up to any previously flushed offset in a BUFFERED
+ * stream, to the offset specified in the request.
+ *
+ */
+ public void flushRows(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request,
+ io.grpc.stub.StreamObserver<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>
+ responseObserver) {
+ asyncUnaryCall(
+ getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request, responseObserver);
+ }
}
/**
@@ -685,6 +777,22 @@ public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStr
return blockingUnaryCall(
getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request);
}
+
+ /**
+ *
+ *
+ *
+ * Flushes rows to a BUFFERED stream.
+ * If users are appending rows to BUFFERED stream, flush operation is
+ * required in order for the rows to become available for reading. A
+ * Flush operation flushes up to any previously flushed offset in a BUFFERED
+ * stream, to the offset specified in the request.
+ *
+ */
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse flushRows(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request) {
+ return blockingUnaryCall(getChannel(), getFlushRowsMethod(), getCallOptions(), request);
+ }
}
/**
@@ -772,13 +880,31 @@ protected BigQueryWriteFutureStub build(
return futureUnaryCall(
getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), request);
}
+
+ /**
+ *
+ *
+ *
+ * Flushes rows to a BUFFERED stream.
+ * If users are appending rows to BUFFERED stream, flush operation is
+ * required in order for the rows to become available for reading. A
+ * Flush operation flushes up to any previously flushed offset in a BUFFERED
+ * stream, to the offset specified in the request.
+ *
+ */
+ public com.google.common.util.concurrent.ListenableFuture<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>
+ flushRows(com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request) {
+ return futureUnaryCall(getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request);
+ }
}
private static final int METHODID_CREATE_WRITE_STREAM = 0;
private static final int METHODID_GET_WRITE_STREAM = 1;
private static final int METHODID_FINALIZE_WRITE_STREAM = 2;
private static final int METHODID_BATCH_COMMIT_WRITE_STREAMS = 3;
- private static final int METHODID_APPEND_ROWS = 4;
+ private static final int METHODID_FLUSH_ROWS = 4;
+ private static final int METHODID_APPEND_ROWS = 5;
private static final class MethodHandlers
implements io.grpc.stub.ServerCalls.UnaryMethod,
@@ -829,6 +955,13 @@ public void invoke(Req request, io.grpc.stub.StreamObserver responseObserv
.BatchCommitWriteStreamsResponse>)
responseObserver);
break;
+ case METHODID_FLUSH_ROWS:
+ serviceImpl.flushRows(
+ (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) request,
+ (io.grpc.stub.StreamObserver<
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>)
+ responseObserver);
+ break;
default:
throw new AssertionError();
}
@@ -904,6 +1037,7 @@ public static io.grpc.ServiceDescriptor getServiceDescriptor() {
.addMethod(getGetWriteStreamMethod())
.addMethod(getFinalizeWriteStreamMethod())
.addMethod(getBatchCommitWriteStreamsMethod())
+ .addMethod(getFlushRowsMethod())
.build();
}
}
diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java
index b0d8882f05..b69d92dd6c 100644
--- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java
+++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java
@@ -8470,6 +8470,1353 @@ public com.google.protobuf.Parser getParserForType(
}
}
+ public interface FlushRowsRequestOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The writeStream.
+ */
+ java.lang.String getWriteStream();
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The bytes for writeStream.
+ */
+ com.google.protobuf.ByteString getWriteStreamBytes();
+
+ /**
+ *
+ *
+ *
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ *
+ * int64 offset = 2;
+ *
+ * @return The offset.
+ */
+ long getOffset();
+ }
+ /**
+ *
+ *
+ *
+ * Request message for `FlushRows`.
+ *
+ *
+ * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest}
+ */
+ public static final class FlushRowsRequest extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest)
+ FlushRowsRequestOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use FlushRowsRequest.newBuilder() to construct.
+ private FlushRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private FlushRowsRequest() {
+ writeStream_ = "";
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new FlushRowsRequest();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private FlushRowsRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 10:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ writeStream_ = s;
+ break;
+ }
+ case 16:
+ {
+ offset_ = input.readInt64();
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.class,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.Builder.class);
+ }
+
+ public static final int WRITE_STREAM_FIELD_NUMBER = 1;
+ private volatile java.lang.Object writeStream_;
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The writeStream.
+ */
+ public java.lang.String getWriteStream() {
+ java.lang.Object ref = writeStream_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ writeStream_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The bytes for writeStream.
+ */
+ public com.google.protobuf.ByteString getWriteStreamBytes() {
+ java.lang.Object ref = writeStream_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ writeStream_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int OFFSET_FIELD_NUMBER = 2;
+ private long offset_;
+ /**
+ *
+ *
+ *
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ *
+ * int64 offset = 2;
+ *
+ * @return The offset.
+ */
+ public long getOffset() {
+ return offset_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (!getWriteStreamBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_);
+ }
+ if (offset_ != 0L) {
+ output.writeInt64(2, offset_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!getWriteStreamBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_);
+ }
+ if (offset_ != 0L) {
+ size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, offset_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest other =
+ (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) obj;
+
+ if (!getWriteStream().equals(other.getWriteStream())) return false;
+ if (getOffset() != other.getOffset()) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER;
+ hash = (53 * hash) + getWriteStream().hashCode();
+ hash = (37 * hash) + OFFSET_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset());
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Request message for `FlushRows`.
+ *
+ *
+ * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest}
+ */
+ public static final class Builder
+ extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest)
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.class,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.Builder.class);
+ }
+
+ // Construct using
+ // com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ writeStream_ = "";
+
+ offset_ = 0L;
+
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ getDefaultInstanceForType() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ .getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest build() {
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest buildPartial() {
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest result =
+ new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest(this);
+ result.writeStream_ = writeStream_;
+ result.offset_ = offset_;
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index,
+ java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) {
+ return mergeFrom(
+ (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest other) {
+ if (other
+ == com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ .getDefaultInstance()) return this;
+ if (!other.getWriteStream().isEmpty()) {
+ writeStream_ = other.writeStream_;
+ onChanged();
+ }
+ if (other.getOffset() != 0L) {
+ setOffset(other.getOffset());
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest)
+ e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private java.lang.Object writeStream_ = "";
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The writeStream.
+ */
+ public java.lang.String getWriteStream() {
+ java.lang.Object ref = writeStream_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ writeStream_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The bytes for writeStream.
+ */
+ public com.google.protobuf.ByteString getWriteStreamBytes() {
+ java.lang.Object ref = writeStream_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ writeStream_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @param value The writeStream to set.
+ * @return This builder for chaining.
+ */
+ public Builder setWriteStream(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ writeStream_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearWriteStream() {
+
+ writeStream_ = getDefaultInstance().getWriteStream();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The stream that is the target of the flush operation.
+ *
+ *
+ *
+ * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @param value The bytes for writeStream to set.
+ * @return This builder for chaining.
+ */
+ public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ writeStream_ = value;
+ onChanged();
+ return this;
+ }
+
+ private long offset_;
+ /**
+ *
+ *
+ *
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ *
+ * int64 offset = 2;
+ *
+ * @return The offset.
+ */
+ public long getOffset() {
+ return offset_;
+ }
+ /**
+ *
+ *
+ *
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ *
+ * int64 offset = 2;
+ *
+ * @param value The offset to set.
+ * @return This builder for chaining.
+ */
+ public Builder setOffset(long value) {
+
+ offset_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ *
+ * int64 offset = 2;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearOffset() {
+
+ offset_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest)
+ private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest();
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public FlushRowsRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new FlushRowsRequest(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest
+ getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+ }
+
+ public interface FlushRowsResponseOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * The rows before this offset (including this offset) are flushed.
+ *
+ *
+ * int64 offset = 1;
+ *
+ * @return The offset.
+ */
+ long getOffset();
+ }
+ /**
+ *
+ *
+ *
+ * Respond message for `FlushRows`.
+ *
+ *
+ * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse}
+ */
+ public static final class FlushRowsResponse extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse)
+ FlushRowsResponseOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use FlushRowsResponse.newBuilder() to construct.
+ private FlushRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private FlushRowsResponse() {}
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new FlushRowsResponse();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private FlushRowsResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 8:
+ {
+ offset_ = input.readInt64();
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.class,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.Builder.class);
+ }
+
+ public static final int OFFSET_FIELD_NUMBER = 1;
+ private long offset_;
+ /**
+ *
+ *
+ *
+ * The rows before this offset (including this offset) are flushed.
+ *
+ *
+ * int64 offset = 1;
+ *
+ * @return The offset.
+ */
+ public long getOffset() {
+ return offset_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (offset_ != 0L) {
+ output.writeInt64(1, offset_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (offset_ != 0L) {
+ size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, offset_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse other =
+ (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse) obj;
+
+ if (getOffset() != other.getOffset()) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + OFFSET_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset());
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Respond message for `FlushRows`.
+ *
+ *
+ * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse}
+ */
+ public static final class Builder
+ extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse)
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.class,
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.Builder.class);
+ }
+
+ // Construct using
+ // com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ offset_ = 0L;
+
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage
+ .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ getDefaultInstanceForType() {
+ return com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ .getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse build() {
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse result =
+ buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse buildPartial() {
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse result =
+ new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse(this);
+ result.offset_ = offset_;
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index,
+ java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse) {
+ return mergeFrom(
+ (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse other) {
+ if (other
+ == com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ .getDefaultInstance()) return this;
+ if (other.getOffset() != 0L) {
+ setOffset(other.getOffset());
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse)
+ e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private long offset_;
+ /**
+ *
+ *
+ *
+ * The rows before this offset (including this offset) are flushed.
+ *
+ *
+ * int64 offset = 1;
+ *
+ * @return The offset.
+ */
+ public long getOffset() {
+ return offset_;
+ }
+ /**
+ *
+ *
+ *
+ * The rows before this offset (including this offset) are flushed.
+ *
+ *
+ * int64 offset = 1;
+ *
+ * @param value The offset to set.
+ * @return This builder for chaining.
+ */
+ public Builder setOffset(long value) {
+
+ offset_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The rows before this offset (including this offset) are flushed.
+ *
+ *
+ * int64 offset = 1;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearOffset() {
+
+ offset_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse)
+ private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse();
+ }
+
+ public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public FlushRowsResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new FlushRowsResponse(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse
+ getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+ }
+
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor;
private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -8506,6 +9853,14 @@ public com.google.protobuf.Parser getParserForType(
internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor;
private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor;
+ private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor;
+ private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
return descriptor;
@@ -8554,45 +9909,54 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "mestamp\"^\n\032FinalizeWriteStreamRequest\022@\n"
+ "\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquerystorage.goo"
+ "gleapis.com/WriteStream\"0\n\033FinalizeWrite"
- + "StreamResponse\022\021\n\trow_count\030\001 \001(\0032\310\n\n\rBi"
- + "gQueryWrite\022\351\001\n\021CreateWriteStream\022@.goog"
- + "le.cloud.bigquery.storage.v1alpha2.Creat"
- + "eWriteStreamRequest\0323.google.cloud.bigqu"
- + "ery.storage.v1alpha2.WriteStream\"]\202\323\344\223\002A"
- + "\"1/v1alpha2/{parent=projects/*/datasets/"
- + "*/tables/*}:\014write_stream\332A\023parent,write"
- + "_stream\022\344\001\n\nAppendRows\0229.google.cloud.bi"
- + "gquery.storage.v1alpha2.AppendRowsReques"
- + "t\032:.google.cloud.bigquery.storage.v1alph"
- + "a2.AppendRowsResponse\"[\202\323\344\223\002F\"A/v1alpha2"
- + "/{write_stream=projects/*/datasets/*/tab"
- + "les/*/streams/*}:\001*\332A\014write_stream(\0010\001\022\321"
- + "\001\n\016GetWriteStream\022=.google.cloud.bigquer"
- + "y.storage.v1alpha2.GetWriteStreamRequest"
- + "\0323.google.cloud.bigquery.storage.v1alpha"
- + "2.WriteStream\"K\202\323\344\223\002>\"9/v1alpha2/{name=p"
- + "rojects/*/datasets/*/tables/*/streams/*}"
- + ":\001*\332A\004name\022\353\001\n\023FinalizeWriteStream\022B.goo"
- + "gle.cloud.bigquery.storage.v1alpha2.Fina"
- + "lizeWriteStreamRequest\032C.google.cloud.bi"
- + "gquery.storage.v1alpha2.FinalizeWriteStr"
- + "eamResponse\"K\202\323\344\223\002>\"9/v1alpha2/{name=pro"
- + "jects/*/datasets/*/tables/*/streams/*}:\001"
- + "*\332A\004name\022\356\001\n\027BatchCommitWriteStreams\022F.g"
- + "oogle.cloud.bigquery.storage.v1alpha2.Ba"
- + "tchCommitWriteStreamsRequest\032G.google.cl"
- + "oud.bigquery.storage.v1alpha2.BatchCommi"
- + "tWriteStreamsResponse\"B\202\323\344\223\0023\0221/v1alpha2"
- + "/{parent=projects/*/datasets/*/tables/*}"
- + "\332A\006parent\032\260\001\312A\036bigquerystorage.googleapi"
- + "s.com\322A\213\001https://www.googleapis.com/auth"
- + "/bigquery,https://www.googleapis.com/aut"
- + "h/bigquery.insertdata,https://www.google"
- + "apis.com/auth/cloud-platformB{\n*com.goog"
- + "le.cloud.bigquery.storage.v1alpha2ZMgoog"
- + "le.golang.org/genproto/googleapis/cloud/"
- + "bigquery/storage/v1alpha2;storageb\006proto"
- + "3"
+ + "StreamResponse\022\021\n\trow_count\030\001 \001(\003\"l\n\020Flu"
+ + "shRowsRequest\022H\n\014write_stream\030\001 \001(\tB2\340A\002"
+ + "\372A,\n*bigquerystorage.googleapis.com/Writ"
+ + "eStream\022\016\n\006offset\030\002 \001(\003\"#\n\021FlushRowsResp"
+ + "onse\022\016\n\006offset\030\001 \001(\0032\250\014\n\rBigQueryWrite\022\351"
+ + "\001\n\021CreateWriteStream\022@.google.cloud.bigq"
+ + "uery.storage.v1alpha2.CreateWriteStreamR"
+ + "equest\0323.google.cloud.bigquery.storage.v"
+ + "1alpha2.WriteStream\"]\202\323\344\223\002A\"1/v1alpha2/{"
+ + "parent=projects/*/datasets/*/tables/*}:\014"
+ + "write_stream\332A\023parent,write_stream\022\344\001\n\nA"
+ + "ppendRows\0229.google.cloud.bigquery.storag"
+ + "e.v1alpha2.AppendRowsRequest\032:.google.cl"
+ + "oud.bigquery.storage.v1alpha2.AppendRows"
+ + "Response\"[\202\323\344\223\002F\"A/v1alpha2/{write_strea"
+ + "m=projects/*/datasets/*/tables/*/streams"
+ + "/*}:\001*\332A\014write_stream(\0010\001\022\321\001\n\016GetWriteSt"
+ + "ream\022=.google.cloud.bigquery.storage.v1a"
+ + "lpha2.GetWriteStreamRequest\0323.google.clo"
+ + "ud.bigquery.storage.v1alpha2.WriteStream"
+ + "\"K\202\323\344\223\002>\"9/v1alpha2/{name=projects/*/dat"
+ + "asets/*/tables/*/streams/*}:\001*\332A\004name\022\353\001"
+ + "\n\023FinalizeWriteStream\022B.google.cloud.big"
+ + "query.storage.v1alpha2.FinalizeWriteStre"
+ + "amRequest\032C.google.cloud.bigquery.storag"
+ + "e.v1alpha2.FinalizeWriteStreamResponse\"K"
+ + "\202\323\344\223\002>\"9/v1alpha2/{name=projects/*/datas"
+ + "ets/*/tables/*/streams/*}:\001*\332A\004name\022\356\001\n\027"
+ + "BatchCommitWriteStreams\022F.google.cloud.b"
+ + "igquery.storage.v1alpha2.BatchCommitWrit"
+ + "eStreamsRequest\032G.google.cloud.bigquery."
+ + "storage.v1alpha2.BatchCommitWriteStreams"
+ + "Response\"B\202\323\344\223\0023\0221/v1alpha2/{parent=proj"
+ + "ects/*/datasets/*/tables/*}\332A\006parent\022\335\001\n"
+ + "\tFlushRows\0228.google.cloud.bigquery.stora"
+ + "ge.v1alpha2.FlushRowsRequest\0329.google.cl"
+ + "oud.bigquery.storage.v1alpha2.FlushRowsR"
+ + "esponse\"[\202\323\344\223\002F\"A/v1alpha2/{write_stream"
+ + "=projects/*/datasets/*/tables/*/streams/"
+ + "*}:\001*\332A\014write_stream\032\260\001\312A\036bigquerystorag"
+ + "e.googleapis.com\322A\213\001https://www.googleap"
+ + "is.com/auth/bigquery,https://www.googlea"
+ + "pis.com/auth/bigquery.insertdata,https:/"
+ + "/www.googleapis.com/auth/cloud-platformB"
+ + "{\n*com.google.cloud.bigquery.storage.v1a"
+ + "lpha2ZMgoogle.golang.org/genproto/google"
+ + "apis/cloud/bigquery/storage/v1alpha2;sto"
+ + "rageb\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -8684,6 +10048,22 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new java.lang.String[] {
"RowCount",
});
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor =
+ getDescriptor().getMessageTypes().get(8);
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor,
+ new java.lang.String[] {
+ "WriteStream", "Offset",
+ });
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor =
+ getDescriptor().getMessageTypes().get(9);
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor,
+ new java.lang.String[] {
+ "Offset",
+ });
com.google.protobuf.ExtensionRegistry registry =
com.google.protobuf.ExtensionRegistry.newInstance();
registry.add(com.google.api.ClientProto.defaultHost);
diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto
index fd13ba3a12..e9a893b993 100644
--- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto
+++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto
@@ -46,6 +46,97 @@ message CreateWriteStreamRequest {
WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED];
}
+// BigQuery Write API.
+//
+// The Write API can be used to write data to BigQuery.
+service BigQueryWrite {
+ option (google.api.default_host) = "bigquerystorage.googleapis.com";
+ option (google.api.oauth_scopes) =
+ "https://www.googleapis.com/auth/bigquery,"
+ "https://www.googleapis.com/auth/bigquery.insertdata,"
+ "https://www.googleapis.com/auth/cloud-platform";
+
+ // Creates a write stream to the given table.
+ rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) {
+ option (google.api.http) = {
+ post: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}"
+ body: "write_stream"
+ };
+ option (google.api.method_signature) = "parent,write_stream";
+ }
+
+ // Appends data to the given stream.
+ //
+ // If `offset` is specified, the `offset` is checked against the end of
+ // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+ // attempt is made to append to an offset beyond the current end of the stream
+ // or `ALREADY_EXISTS` if user provids an `offset` that has already been
+ // written to. User can retry with adjusted offset within the same RPC
+ // stream. If `offset` is not specified, append happens at the end of the
+ // stream.
+ //
+ // The response contains the offset at which the append happened. Responses
+ // are received in the same order in which requests are sent. There will be
+ // one response for each successful request. If the `offset` is not set in
+ // response, it means append didn't happen due to some errors. If one request
+ // fails, all the subsequent requests will also fail until a success request
+ // is made again.
+ //
+ // If the stream is of `PENDING` type, data will only be available for read
+ // operations after the stream is committed.
+ rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) {
+ option (google.api.http) = {
+ post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}"
+ body: "*"
+ };
+ option (google.api.method_signature) = "write_stream";
+ }
+
+ // Gets a write stream.
+ rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) {
+ option (google.api.http) = {
+ post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}"
+ body: "*"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Finalize a write stream so that no new data can be appended to the
+ // stream.
+ rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) {
+ option (google.api.http) = {
+ post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}"
+ body: "*"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Atomically commits a group of `PENDING` streams that belong to the same
+ // `parent` table.
+ // Streams must be finalized before commit and cannot be committed multiple
+ // times. Once a stream is committed, data in the stream becomes available
+ // for read operations.
+ rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) {
+ option (google.api.http) = {
+ get: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Flushes rows to a BUFFERED stream.
+ // If users are appending rows to BUFFERED stream, flush operation is
+ // required in order for the rows to become available for reading. A
+ // Flush operation flushes up to any previously flushed offset in a BUFFERED
+ // stream, to the offset specified in the request.
+ rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) {
+ option (google.api.http) = {
+ post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}"
+ body: "*"
+ };
+ option (google.api.method_signature) = "write_stream";
+ }
+}
+
// Request message for `AppendRows`.
message AppendRowsRequest {
message ProtoData {
@@ -143,81 +234,23 @@ message FinalizeWriteStreamResponse {
int64 row_count = 1;
}
-// BigQuery Write API.
-//
-// The Write API can be used to write data to BigQuery.
-service BigQueryWrite {
- option (google.api.default_host) = "bigquerystorage.googleapis.com";
- option (google.api.oauth_scopes) =
- "https://www.googleapis.com/auth/bigquery,"
- "https://www.googleapis.com/auth/bigquery.insertdata,"
- "https://www.googleapis.com/auth/cloud-platform";
-
- // Creates a write stream to the given table.
- rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) {
- option (google.api.http) = {
- post: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}"
- body: "write_stream"
- };
- option (google.api.method_signature) = "parent,write_stream";
- }
-
- // Appends data to the given stream.
- //
- // If `offset` is specified, the `offset` is checked against the end of
- // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
- // attempt is made to append to an offset beyond the current end of the stream
- // or `ALREADY_EXISTS` if user provids an `offset` that has already been
- // written to. User can retry with adjusted offset within the same RPC
- // stream. If `offset` is not specified, append happens at the end of the
- // stream.
- //
- // The response contains the offset at which the append happened. Responses
- // are received in the same order in which requests are sent. There will be
- // one response for each successful request. If the `offset` is not set in
- // response, it means append didn't happen due to some errors. If one request
- // fails, all the subsequent requests will also fail until a success request
- // is made again.
- //
- // If the stream is of `PENDING` type, data will only be available for read
- // operations after the stream is committed.
- rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) {
- option (google.api.http) = {
- post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}"
- body: "*"
- };
- option (google.api.method_signature) = "write_stream";
- }
-
- // Gets a write stream.
- rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) {
- option (google.api.http) = {
- post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}"
- body: "*"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Finalize a write stream so that no new data can be appended to the
- // stream.
- rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) {
- option (google.api.http) = {
- post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}"
- body: "*"
- };
- option (google.api.method_signature) = "name";
- }
+// Request message for `FlushRows`.
+message FlushRowsRequest {
+ // Required. The stream that is the target of the flush operation.
+ string write_stream = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "bigquerystorage.googleapis.com/WriteStream"
+ }
+ ];
- // Atomically commits a group of `PENDING` streams that belong to the same
- // `parent` table.
- // Streams must be finalized before commit and cannot be committed multiple
- // times. Once a stream is committed, data in the stream becomes available
- // for read operations.
- rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) {
- option (google.api.http) = {
- get: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}"
- };
- option (google.api.method_signature) = "parent";
- }
+ // Ending offset of the flush operation. Rows before this offset(including
+ // this offset) will be flushed.
+ int64 offset = 2;
+}
+// Respond message for `FlushRows`.
+message FlushRowsResponse {
+ // The rows before this offset (including this offset) are flushed.
+ int64 offset = 1;
}
diff --git a/synth.metadata b/synth.metadata
index 0f40998f7d..84476fa9b6 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -11,39 +11,39 @@
"git": {
"name": ".",
"remote": "https://github.com/googleapis/java-bigquerystorage.git",
- "sha": "5955fdd0240d5a2ea2d105ea72033b388237e20c"
+ "sha": "d71e6b7166bc17579c33400c443ef7c5eec7ee8c"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "2fc2caaacb15949c7f80426bfc7dafdd41dbc333",
- "internalRef": "310239576"
+ "sha": "bf17ae5fd93929beb44ac4c6b04f5088c3ee4a02",
+ "internalRef": "311188524"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "2fc2caaacb15949c7f80426bfc7dafdd41dbc333",
- "internalRef": "310239576"
+ "sha": "bf17ae5fd93929beb44ac4c6b04f5088c3ee4a02",
+ "internalRef": "311188524"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "2fc2caaacb15949c7f80426bfc7dafdd41dbc333",
- "internalRef": "310239576"
+ "sha": "bf17ae5fd93929beb44ac4c6b04f5088c3ee4a02",
+ "internalRef": "311188524"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "2fc2caaacb15949c7f80426bfc7dafdd41dbc333",
- "internalRef": "310239576"
+ "sha": "bf17ae5fd93929beb44ac4c6b04f5088c3ee4a02",
+ "internalRef": "311188524"
}
},
{